code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
A_ = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCAmelCase__ )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
A_ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
A_ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A_ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
A_ = F'''layers_{str(UpperCAmelCase__ )}'''
# Self-Attention
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A_ = flax_model.params["""encoder"""]["""block"""][str(UpperCAmelCase__ )]["""layer"""]
A_ = tax_attention_key
A_ = tax_attention_out
A_ = tax_attention_query
A_ = tax_attention_value
A_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ = tax_global_layer_norm
if split_mlp_wi:
A_ = tax_mlp_wi_a
A_ = tax_mlp_wi_a
else:
A_ = tax_mlp_wi
A_ = tax_mlp_wo
A_ = tax_mlp_layer_norm
A_ = flax_model_encoder_layer_block
# Only for layer 0:
A_ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
A_ = tax_encoder_global_rel_embedding
# Assigning
A_ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
A_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A_ = F'''layers_{str(UpperCAmelCase__ )}'''
# Self-Attention
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
A_ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
A_ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
A_ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
A_ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A_ = flax_model.params["""decoder"""]["""block"""][str(UpperCAmelCase__ )]["""layer"""]
A_ = tax_attention_key
A_ = tax_attention_out
A_ = tax_attention_query
A_ = tax_attention_value
A_ = tax_pre_attention_layer_norm
A_ = tax_enc_dec_attention_key
A_ = tax_enc_dec_attention_out
A_ = tax_enc_dec_attention_query
A_ = tax_enc_dec_attention_value
A_ = tax_cross_layer_norm
if split_mlp_wi:
A_ = tax_mlp_wi_a
A_ = tax_mlp_wi_a
else:
A_ = tax_mlp_wi
A_ = tax_mlp_wo
A_ = txa_mlp_layer_norm
A_ = flax_model_decoder_layer_block
# Decoder Normalization
A_ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
A_ = txa_decoder_norm
# Only for layer 0:
A_ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A_ = tax_decoder_rel_embedding
# Token Embeddings
A_ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
A_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A_ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(UpperCAmelCase__ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
__lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = ["pixel_values"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ = IMAGENET_DEFAULT_STD , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = size if size is not None else {"""shortest_edge""": 224}
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A_ = int((256 / 224) * size["""shortest_edge"""] )
A_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
A_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
A_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
A_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCamelCase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> str:
'''simple docstring'''
return self.id
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list:
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1, len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
A_ = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1, len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "upernet"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=384 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = backbone_config.get("""model_type""" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCamelCase__ )
A_ = backbone_config
A_ = hidden_size
A_ = initializer_range
A_ = pool_scales
A_ = use_auxiliary_head
A_ = auxiliary_loss_weight
A_ = auxiliary_in_channels
A_ = auxiliary_channels
A_ = auxiliary_num_convs
A_ = auxiliary_concat_input
A_ = loss_ignore_index
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = VQModel
lowercase = "sample"
@property
def snake_case_ ( self , UpperCamelCase__=(32, 32) ) -> Optional[int]:
'''simple docstring'''
A_ = 4
A_ = 3
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ , A_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
A_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ = image.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ ).sample
A_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__lowerCamelCase = json.load(f)
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = f'''facebook/wmt19-{pair}'''
A_ = self.get_tokenizer(UpperCamelCase__ )
A_ = self.get_model(UpperCamelCase__ )
A_ = bleu_data[pair]["""src"""]
A_ = bleu_data[pair]["""tgt"""]
A_ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ , padding="""longest""" ).to(UpperCamelCase__ )
A_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
A_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase__ )
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A__ ( yaml.SafeLoader ):
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys]
A_ = Counter(UpperCamelCase__ )
A_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Tuple:
'''simple docstring'''
A_ = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ )
self._check_no_duplicates_on_constructed_node(UpperCamelCase__ )
return mapping
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple[Optional[str], str]:
A_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ = full_content[1:].index("""---""" ) + 1
A_ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCAmelCase__ )
class A__ ( _snake_case ):
# class attributes
lowercase = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def snake_case_ ( cls , UpperCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file:
A_ , A_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCamelCase__ )
else:
return cls()
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file:
A_ = readme_file.read()
else:
A_ = None
A_ = self._to_readme(UpperCamelCase__ )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ = None ) -> str:
'''simple docstring'''
if readme_content is not None:
A_ , A_ = _split_yaml_from_readme(UpperCamelCase__ )
A_ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
A_ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def snake_case_ ( cls , UpperCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
A_ = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding="""utf-8""" , ).decode("""utf-8""" )
__lowerCamelCase = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase = ap.parse_args()
__lowerCamelCase = Path(args.readme_filepath)
__lowerCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_: Dict = "▁"
lowerCAmelCase_: str = {"vocab_file": "spiece.model"}
lowerCAmelCase_: Union[str, Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowerCAmelCase_: Any = {
"google/reformer-crime-and-punishment": 5_2_4_2_8_8,
}
class a__ ( _a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self, _UpperCAmelCase, _UpperCAmelCase="</s>", _UpperCAmelCase="<unk>", _UpperCAmelCase=[], _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowercase__ = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
lowercase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
lowercase__ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A , A ):
'''simple docstring'''
lowercase__ = u
for i in range(1 , A ):
lowercase__ = temp * (u - i)
return temp
def __a ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(A ):
y.append([] )
for i in range(A ):
for j in range(A ):
y[i].append(A )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(A , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(A ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 , A ):
summ += (ucal(A , A ) * y[0][i]) / math.factorial(A )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __a ( A ):
'''simple docstring'''
lowercase__ = checkpoints.load_tax_checkpoint(A )
lowercase__ = flatten_dict(A )
return flax_params
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
lowercase__ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(A , A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(A , A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(r"layers_(\d+)" , r"layer.\1" , A )
lowercase__ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(r"layers_(\d+)" , r"layer.\1" , A )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __a ( A , A , A=False , A=False ):
'''simple docstring'''
lowercase__ = get_flax_param(A )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A )
lowercase__ = PixaStructForConditionalGeneration(A )
lowercase__ = rename_and_convert_flax_params(A )
model.load_state_dict(A )
lowercase__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=A , tokenizer=A )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(A , exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
print("Model saved in {}".format(A ) )
if __name__ == "__main__":
lowerCAmelCase_: List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
lowerCAmelCase_: int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
def __a ( A = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
lowercase__ = int(A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
while n % i == 0:
lowercase__ = i
n //= i
i += 1
if n > 1:
lowercase__ = n
return int(A )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_: Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __a ( A ):
'''simple docstring'''
return "".join(sorted(A ) )
def __a ( A ):
'''simple docstring'''
return word_by_signature[signature(A )]
lowerCAmelCase_: str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
lowerCAmelCase_: Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase_: Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase_: List[str] = "src/transformers"
lowerCAmelCase_: Any = "docs/source/en/tasks"
def __a ( A , A , A ):
'''simple docstring'''
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.readlines()
# Find the start prompt.
lowercase__ = 0
while not lines[start_index].startswith(A ):
start_index += 1
start_index += 1
lowercase__ = start_index
while not lines[end_index].startswith(A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_: Dict = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase_: Dict = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase_: Union[str, Any] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __a ( A ):
'''simple docstring'''
lowercase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowercase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A , set() )
lowercase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = _find_text_in_file(
filename=os.path.join(A , A ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowercase__ = get_model_list_for_task(A )
if current_list != new_list:
if overwrite:
with open(os.path.join(A , A ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
lowerCAmelCase_: Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase_: Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 668 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_: List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = XLMRobertaTokenizer
snake_case_ = XLMRobertaTokenizerFast
snake_case_ = True
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "<pad>"
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(_UpperCAmelCase ), 1002 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1002 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowercase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def snake_case__ ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = self.tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowercase__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase, _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase, legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase, _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase, legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase, _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def snake_case__ ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase, f.name )
lowercase__ = XLMRobertaTokenizer(f.name, keep_accents=_UpperCAmelCase )
lowercase__ = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = "I was born in 92000, and this is falsé."
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "Hello World!"
lowercase__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
| 668 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: str = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "transfo-xl"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=26_7735, _UpperCAmelCase=[2_0000, 4_0000, 20_0000], _UpperCAmelCase=1024, _UpperCAmelCase=1024, _UpperCAmelCase=16, _UpperCAmelCase=64, _UpperCAmelCase=4096, _UpperCAmelCase=4, _UpperCAmelCase=False, _UpperCAmelCase=18, _UpperCAmelCase=1600, _UpperCAmelCase=1000, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=0, _UpperCAmelCase=-1, _UpperCAmelCase=True, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=True, _UpperCAmelCase="normal", _UpperCAmelCase=0.01, _UpperCAmelCase=0.01, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=0, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 668 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase_: List[str] = "true"
def __a ( A , A=82 , A=16 ):
'''simple docstring'''
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(A )
lowercase__ = RegressionDataset(length=A )
lowercase__ = DataLoader(A , batch_size=A )
model.to(accelerator.device )
lowercase__ , lowercase__ = accelerator.prepare(A , A )
return model, ddp_model, dataloader
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
lowercase__ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(A ):
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
with accelerator.main_process_first():
lowercase__ = dataset.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , )
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
if use_longest:
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return DataLoader(A , shuffle=A , collate_fn=A , batch_size=16 )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator(dispatch_batches=A , split_batches=A )
lowercase__ = get_dataloader(A , not dispatch_batches )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=A )
lowercase__ , lowercase__ = accelerator.prepare(A , A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = []
for batch in dataloader:
lowercase__ , lowercase__ = batch.values()
with torch.no_grad():
lowercase__ = model(A )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__ = [], []
for logit, targ in logits_and_targets:
logits.append(A )
targs.append(A )
lowercase__ , lowercase__ = torch.cat(A ), torch.cat(A )
return logits, targs
def __a ( A , A=82 , A=False , A=False , A=16 ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ = get_basic_setup(A , A , A )
lowercase__ , lowercase__ = generate_predictions(A , A , A )
assert (
len(A ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A )}'''
def __a ( A = False , A = False ):
'''simple docstring'''
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ , lowercase__ = get_mrpc_setup(A , A )
# First do baseline
lowercase__ , lowercase__ , lowercase__ = setup["no"]
model.to(A )
model.eval()
for batch in dataloader:
batch.to(A )
with torch.inference_mode():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A , references=batch["labels"] )
lowercase__ = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ = batch["labels"]
lowercase__ , lowercase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A , references=A )
lowercase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __a ( ):
'''simple docstring'''
lowercase__ = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(A , A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__ = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(A , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
lowercase__ = Accelerator()
test_torch_metrics(A , 5_12 )
accelerator.state._reset_state()
def __a ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 1 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
stooge(A , 0 , len(A ) - 1 )
return arr
def __a ( A , A , A ):
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A , A , (h - t) )
# Recursively sort last 2/3 elements
stooge(A , i + t , (A) )
# Recursively sort first 2/3 elements
stooge(A , A , (h - t) )
if __name__ == "__main__":
lowerCAmelCase_: int = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_: Union[str, Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 668 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCAmelCase_ , lowerCAmelCase_: str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_: Optional[int] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 668 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
def __a ( A = 1_00_00_00 ):
'''simple docstring'''
lowercase__ = limit + 1
lowercase__ = [0] * limit
for first_term in range(1 , A ):
for n in range(A , A , A ):
lowercase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 668 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "xlnet"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=3_2000, _UpperCAmelCase=1024, _UpperCAmelCase=24, _UpperCAmelCase=16, _UpperCAmelCase=4096, _UpperCAmelCase="gelu", _UpperCAmelCase=True, _UpperCAmelCase="bi", _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=None, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=-1, _UpperCAmelCase=False, _UpperCAmelCase="last", _UpperCAmelCase=True, _UpperCAmelCase="tanh", _UpperCAmelCase=0.1, _UpperCAmelCase=5, _UpperCAmelCase=5, _UpperCAmelCase=5, _UpperCAmelCase=1, _UpperCAmelCase=2, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = n_layer
lowercase__ = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase__ = d_model // n_head
lowercase__ = ff_activation
lowercase__ = d_inner
lowercase__ = untie_r
lowercase__ = attn_type
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = dropout
lowercase__ = mem_len
lowercase__ = reuse_len
lowercase__ = bi_data
lowercase__ = clamp_len
lowercase__ = same_length
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_last_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", _UpperCAmelCase, )
lowercase__ = kwargs["use_cache"]
lowercase__ = use_mems_eval
lowercase__ = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
import requests
lowerCAmelCase_: Optional[int] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __a ( A ):
'''simple docstring'''
lowercase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: Optional[Any] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( _a ):
snake_case_ = "fnet"
def __init__( self, _UpperCAmelCase=3_2000, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu_new", _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=4, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=False, _UpperCAmelCase=512, _UpperCAmelCase=3, _UpperCAmelCase=1, _UpperCAmelCase=2, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = use_tpu_fourier_optimizations
lowercase__ = tpu_short_seq_length
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
while b:
lowercase__ , lowercase__ = b, a % b
return a
def __a ( A , A ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(A , a % b )
def __a ( ):
'''simple docstring'''
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_: Optional[int] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: Tuple = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class a__ ( _a ):
snake_case_ = "data2vec-audio"
def __init__( self, _UpperCAmelCase=32, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase="gelu", _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512), _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2), _UpperCAmelCase=False, _UpperCAmelCase=16, _UpperCAmelCase=19, _UpperCAmelCase=5, _UpperCAmelCase=0.05, _UpperCAmelCase=10, _UpperCAmelCase=2, _UpperCAmelCase=0.0, _UpperCAmelCase=10, _UpperCAmelCase=0, _UpperCAmelCase="sum", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=256, _UpperCAmelCase=(512, 512, 512, 512, 1500), _UpperCAmelCase=(5, 3, 3, 1, 1), _UpperCAmelCase=(1, 2, 3, 1, 1), _UpperCAmelCase=512, _UpperCAmelCase=0, _UpperCAmelCase=1, _UpperCAmelCase=2, _UpperCAmelCase=False, _UpperCAmelCase=3, _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_activation
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = conv_pos_kernel_size
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = xvector_output_dim
@property
def snake_case__ ( self ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase_: List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
snake_case_ = None
snake_case_ = "utf-8"
snake_case_ = None
snake_case_ = None
snake_case_ = True # deprecated
snake_case_ = None # deprecated
snake_case_ = 10 << 20 # 10MB
snake_case_ = None
class a__ ( datasets.ArrowBasedBuilder ):
snake_case_ = JsonConfig
def snake_case__ ( self ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
lowercase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase, (str, list, tuple) ):
lowercase__ = data_files
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
lowercase__ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase, gen_kwargs={"files": files} ) )
return splits
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase__ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
lowercase__ = pa_table.append_column(_UpperCAmelCase, pa.array([None] * len(_UpperCAmelCase ), type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ = table_cast(_UpperCAmelCase, self.config.features.arrow_schema )
return pa_table
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f:
lowercase__ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
lowercase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase, (list, tuple) ):
lowercase__ = set().union(*[row.keys() for row in dataset] )
lowercase__ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
lowercase__ = dataset
lowercase__ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase, "rb" ) as f:
lowercase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase__ = max(self.config.chunksize // 32, 16 << 10 )
lowercase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowercase__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase__ = batch.decode(self.config.encoding, errors=_UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
lowercase__ = paj.read_json(
io.BytesIO(_UpperCAmelCase ), read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase, pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(_UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f:
lowercase__ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase, _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
lowercase__ = set().union(*[row.keys() for row in dataset] )
lowercase__ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
lowercase__ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: Any = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "codegen"
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=5_0400, _UpperCAmelCase=2048, _UpperCAmelCase=2048, _UpperCAmelCase=4096, _UpperCAmelCase=28, _UpperCAmelCase=16, _UpperCAmelCase=64, _UpperCAmelCase=None, _UpperCAmelCase="gelu_new", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.02, _UpperCAmelCase=True, _UpperCAmelCase=5_0256, _UpperCAmelCase=5_0256, _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_ctx
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = rotary_dim
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, tie_word_embeddings=_UpperCAmelCase, **_UpperCAmelCase )
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = "default", _UpperCAmelCase = None, _UpperCAmelCase = False, ):
'''simple docstring'''
super().__init__(_UpperCAmelCase, task=_UpperCAmelCase, patching_specs=_UpperCAmelCase, use_past=_UpperCAmelCase )
if not getattr(self._config, "pad_token_id", _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__ = 0
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase, direction="inputs" )
lowercase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self ):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = -1, _UpperCAmelCase = -1, _UpperCAmelCase = False, _UpperCAmelCase = None, ):
'''simple docstring'''
lowercase__ = super(_UpperCAmelCase, self ).generate_dummy_inputs(
_UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs["attention_mask"]
if self.use_past:
lowercase__ = ordered_inputs["attention_mask"].dtype
lowercase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCAmelCase, _UpperCAmelCase, dtype=_UpperCAmelCase )], dim=1 )
return ordered_inputs
@property
def snake_case__ ( self ):
'''simple docstring'''
return 13
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Any = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "nllb-moe"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, _UpperCAmelCase=12_8112, _UpperCAmelCase=1024, _UpperCAmelCase=12, _UpperCAmelCase=4096, _UpperCAmelCase=16, _UpperCAmelCase=12, _UpperCAmelCase=4096, _UpperCAmelCase=16, _UpperCAmelCase=0.05, _UpperCAmelCase=0.05, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase="relu", _UpperCAmelCase=1024, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=2, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase="float32", _UpperCAmelCase=False, _UpperCAmelCase=128, _UpperCAmelCase=64, _UpperCAmelCase=4, _UpperCAmelCase=4, _UpperCAmelCase=0.001, _UpperCAmelCase=0.001, _UpperCAmelCase="all", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=1.0, _UpperCAmelCase=0.2, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = router_z_loss_coef
lowercase__ = router_aux_loss_coef
lowercase__ = decoder_sparse_step
lowercase__ = encoder_sparse_step
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = batch_prioritized_routing
lowercase__ = second_expert_policy
lowercase__ = normalize_router_prob_before_dropping
lowercase__ = moe_eval_capacity_token_fraction
lowercase__ = moe_token_dropout
lowercase__ = output_router_logits
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, is_encoder_decoder=_UpperCAmelCase, decoder_start_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase_: Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_: Any = 1_2_8_0_2_2
lowerCAmelCase_: List[Any] = 1_2_8_0_2_8
@require_sentencepiece
class a__ ( _a , unittest.TestCase ):
snake_case_ = MaMaaaTokenizer
snake_case_ = False
snake_case_ = False
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(_UpperCAmelCase, save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase, save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowercase__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "</s>"
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0], "</s>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "<s>" )
self.assertEqual(len(_UpperCAmelCase ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [2, 3, 4, 5, 6], )
lowercase__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "This is a test" )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="facebook/m2m100_418M", revision="c168bae485c864188cf9aa0e4108b0b6934dc91e", )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
snake_case_ = "facebook/m2m100_418M"
snake_case_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
snake_case_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
snake_case_ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
lowercase__ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en", tgt_lang="fr" )
lowercase__ = 1
return cls
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ), 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ), 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ), 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ), 12_8063 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.tokenizer.get_vocab()
self.assertEqual(len(_UpperCAmelCase ), self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"], 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "en"
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
self.assertIn(_UpperCAmelCase, self.tokenizer.all_special_ids )
# fmt: off
lowercase__ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
lowercase__ = self.tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
lowercase__ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = MaMaaaTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id, _UpperCAmelCase )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "en"
lowercase__ = "fr"
lowercase__ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_UpperCAmelCase, return_tensors="pt" )
lowercase__ = shift_tokens_right(
batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id )
for k in batch:
lowercase__ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
lowercase__ = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase__ = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.tokenizer._build_translation_inputs("A test", return_tensors="pt", src_lang="en", tgt_lang="ar" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
}, )
| 668 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 1 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a__ ( _a , unittest.TestCase ):
snake_case_ = XLMProphetNetTokenizer
snake_case_ = False
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMProphetNetTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "[PAD]"
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "[PAD]" )
self.assertEqual(vocab_keys[1], "[CLS]" )
self.assertEqual(vocab_keys[-1], "j" )
self.assertEqual(len(_UpperCAmelCase ), 1012 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1012 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = XLMProphetNetTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowercase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
], )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "Hello World!"
lowercase__ = [3_5389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="microsoft/xprophetnet-large-wiki100-cased", revision="1acad1643ddd54a44df6a1b797ada8373685d90e", )
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
return model
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
return model
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, )
return RobertaSeriesModelWithTransformation(_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
def extract(*_UpperCAmelCase, **_UpperCAmelCase ):
class a__ :
def __init__( self ):
'''simple docstring'''
lowercase__ = torch.ones([0] )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase__ = 77
lowercase__ = self.dummy_image.to(_UpperCAmelCase )
lowercase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase__ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase, scheduler=_UpperCAmelCase, vae=_UpperCAmelCase, text_encoder=_UpperCAmelCase, tokenizer=_UpperCAmelCase, safety_checker=_UpperCAmelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_UpperCAmelCase )
lowercase__ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = alt_pipe(
[prompt], generator=_UpperCAmelCase, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_UpperCAmelCase, )
lowercase__ = output.images
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = alt_pipe(
[prompt], generator=_UpperCAmelCase, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_UpperCAmelCase, return_dict=_UpperCAmelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase__ = 77
lowercase__ = self.dummy_image.to(_UpperCAmelCase )
# put models in fp16
lowercase__ = unet.half()
lowercase__ = vae.half()
lowercase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase, scheduler=_UpperCAmelCase, vae=_UpperCAmelCase, text_encoder=_UpperCAmelCase, tokenizer=_UpperCAmelCase, safety_checker=_UpperCAmelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_UpperCAmelCase )
lowercase__ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.manual_seed(0 )
lowercase__ = alt_pipe(
[prompt], generator=_UpperCAmelCase, num_inference_steps=2, output_type="np", image=_UpperCAmelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = init_image.resize((760, 504) )
lowercase__ = "BAAI/AltDiffusion"
lowercase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase, safety_checker=_UpperCAmelCase, )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase, image=_UpperCAmelCase, strength=0.75, guidance_scale=7.5, generator=_UpperCAmelCase, output_type="np", )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ = init_image.resize((768, 512) )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase__ = "BAAI/AltDiffusion"
lowercase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase, safety_checker=_UpperCAmelCase, )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase, image=_UpperCAmelCase, strength=0.75, guidance_scale=7.5, generator=_UpperCAmelCase, output_type="np", )
lowercase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 668 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=12, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=32, _UpperCAmelCase=2, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=0.02, _UpperCAmelCase=0, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = projection_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = bos_token_id
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ = input_mask.numpy()
lowercase__ , lowercase__ = input_mask.shape
lowercase__ = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
lowercase__ = 1
lowercase__ = 0
lowercase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TFBlipTextModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, training=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( _a , unittest.TestCase ):
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = BlipTextModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 668 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=[1, 1, 2], _UpperCAmelCase=1, _UpperCAmelCase=32, _UpperCAmelCase=4, _UpperCAmelCase=8, _UpperCAmelCase=37, _UpperCAmelCase="gelu_new", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=512, _UpperCAmelCase=3, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase=None, _UpperCAmelCase=False, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = TFFunnelModel(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = TFFunnelBaseModel(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = TFFunnelForPreTraining(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = TFFunnelForMaskedLM(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=_UpperCAmelCase )
lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = TFFunnelForQuestionAnswering(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = TFFunnelModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@require_tf
class a__ ( _a , unittest.TestCase ):
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = TFFunnelModelTester(self, base=_UpperCAmelCase )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
| 668 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: List[str] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Dict = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __a ( A , A=10 ):
'''simple docstring'''
lowercase__ = []
for _ in range(A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __a ( A , A=10 ):
'''simple docstring'''
lowercase__ = []
for step in range(A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(A , "schedule.bin" )
torch.save(scheduler.state_dict() , A )
lowercase__ = torch.load(A )
scheduler.load_state_dict(A )
return lrs
@require_torch
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase, _UpperCAmelCase, delta=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = torch.tensor([0.1, -0.2, -0.1], requires_grad=_UpperCAmelCase )
lowercase__ = torch.tensor([0.4, 0.2, -0.5] )
lowercase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase__ = AdamW(params=[w], lr=2E-1, weight_decay=0.0 )
for _ in range(100 ):
lowercase__ = criterion(_UpperCAmelCase, _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = torch.tensor([0.1, -0.2, -0.1], requires_grad=_UpperCAmelCase )
lowercase__ = torch.tensor([0.4, 0.2, -0.5] )
lowercase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase__ = Adafactor(
params=[w], lr=1E-2, eps=(1E-30, 1E-3), clip_threshold=1.0, decay_rate=-0.8, betaa=_UpperCAmelCase, weight_decay=0.0, relative_step=_UpperCAmelCase, scale_parameter=_UpperCAmelCase, warmup_init=_UpperCAmelCase, )
for _ in range(1000 ):
lowercase__ = criterion(_UpperCAmelCase, _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
@require_torch
class a__ ( unittest.TestCase ):
snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
snake_case_ = 10
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase, _UpperCAmelCase, delta=_UpperCAmelCase, msg=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowercase__ , lowercase__ = data
lowercase__ = scheduler_func(self.optimizer, **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ), 1 )
lowercase__ = unwrap_schedule(_UpperCAmelCase, self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase, _UpperCAmelCase, tol=1E-2, msg=F'''failed for {scheduler_func} in normal scheduler''', )
lowercase__ = scheduler_func(self.optimizer, **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
lowercase__ = unwrap_and_save_reload_schedule(_UpperCAmelCase, self.num_steps )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase, msg=F'''failed for {scheduler_func} in save and reload''' )
class a__ :
def __init__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = fn
def __call__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return self.fn(*_UpperCAmelCase, **_UpperCAmelCase )
@classmethod
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = list(map(self, scheduler.lr_lambdas ) )
| 668 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self, _UpperCAmelCase = 1, _UpperCAmelCase = 100, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowercase__ = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase__ = audio_length_in_s * self.unet.config.sample_rate
lowercase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase__ = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowercase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
lowercase__ = int(_UpperCAmelCase )
lowercase__ = next(iter(self.unet.parameters() ) ).dtype
lowercase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase__ = randn_tensor(_UpperCAmelCase, generator=_UpperCAmelCase, device=self.device, dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase, device=audio.device )
lowercase__ = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(_UpperCAmelCase, _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
lowercase__ = audio.clamp(-1, 1 ).float().cpu().numpy()
lowercase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 668 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase_: Dict = logging.get_logger(__name__)
def __a ( A , A ):
'''simple docstring'''
def run_func(A ):
@wraps(A )
def run_in_eager_mode(*A , **A ):
return func(*A , **A )
@wraps(A )
@tf.function(experimental_compile=A )
def run_in_graph_mode(*A , **A ):
return func(*A , **A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = random.Random()
lowercase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( _a ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = "TensorFlow"
@property
def snake_case__ ( self ):
'''simple docstring'''
return tf.__version__
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_inference_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_speed(_inference )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_train_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_speed(_train )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _UpperCAmelCase )
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_inference_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_memory(_inference )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _UpperCAmelCase )
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_train_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_memory(_train )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase__ = (
hasattr(_UpperCAmelCase, "architectures" )
and isinstance(config.architectures, _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ = __import__("transformers", fromlist=[model_class] )
lowercase__ = getattr(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase__ = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase__ = config.vocab_size if hasattr(_UpperCAmelCase, "vocab_size" ) else config.encoder.vocab_size
lowercase__ = random_input_ids(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase, training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase, training=_UpperCAmelCase )
lowercase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase__ = (
hasattr(_UpperCAmelCase, "architectures" )
and isinstance(config.architectures, _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ = __import__("transformers", fromlist=[model_class] )
lowercase__ = getattr(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase__ = config.vocab_size if hasattr(_UpperCAmelCase, "vocab_size" ) else config.encoder.vocab_size
lowercase__ = random_input_ids(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
lowercase__ = model(_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase )[0]
lowercase__ = tf.gradients(_UpperCAmelCase, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
lowercase__ = model(_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase )[0]
lowercase__ = tf.gradients(_UpperCAmelCase, model.trainable_variables )
return gradients
lowercase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_UpperCAmelCase, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase__ = timeit.repeat(
_UpperCAmelCase, repeat=self.args.repeat, number=10, )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowercase__ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowercase__ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowercase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase__ = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
lowercase__ = meminfo.used
lowercase__ = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowercase__ = None
else:
lowercase__ = measure_peak_memory_cpu(_UpperCAmelCase )
lowercase__ = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase__ = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
lowercase__ = summary.total
else:
lowercase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 668 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = MobileBertConfig.from_json_file(A )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ = MobileBertForPreTraining(A )
# Load weights from tf checkpoint
lowercase__ = load_tf_weights_in_mobilebert(A , A , A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_: Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 668 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "swinv2"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, _UpperCAmelCase=224, _UpperCAmelCase=4, _UpperCAmelCase=3, _UpperCAmelCase=96, _UpperCAmelCase=[2, 2, 6, 2], _UpperCAmelCase=[3, 6, 12, 24], _UpperCAmelCase=7, _UpperCAmelCase=4.0, _UpperCAmelCase=True, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase="gelu", _UpperCAmelCase=False, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=32, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(_UpperCAmelCase )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 668 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 1 |
"""simple docstring"""
lowerCAmelCase_: Optional[Any] = 8.314_462 # Unit - J mol-1 K-1
def __a ( A , A , A ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __a ( A , A , A ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def __a ( A ):
'''simple docstring'''
lowercase__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowercase__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(A ).content
if __name__ == "__main__":
lowerCAmelCase_: int = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase_: Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a__ ( _a ):
snake_case_ = "microsoft/speecht5_tts"
snake_case_ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case_ = "text_reader"
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ["text"]
snake_case_ = ["audio"]
def snake_case__ ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase__ = "microsoft/speecht5_hifigan"
super().setup()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = self.pre_processor(text=_UpperCAmelCase, return_tensors="pt", truncation=_UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowercase__ = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation" )
lowercase__ = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_UpperCAmelCase ).cpu().detach()
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase_: List[str] = namedtuple("from_to", "from_ to")
lowerCAmelCase_: int = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00_454, 264.172),
"cubicyard": from_to(0.76_455, 1.30_795),
"cubicfoot": from_to(0.028, 35.3_147),
"cup": from_to(0.000_236_588, 4_226.75),
}
def __a ( A , A , A ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class a__ :
def __init__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self, _UpperCAmelCase ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
self.neighbors.append(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = weight
def __a ( A , A , A , A ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A )
graph[b - 1].add_edge(graph[a - 1] , A )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(A )
q.remove(A )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(A ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( A , A ):
'''simple docstring'''
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(A )
hq.heapify(A )
while h:
lowercase__ = hq.heappop(A )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(A )
for i in range(1 , len(A ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __a ( A , A ):
'''simple docstring'''
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ = ParquetDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_parquet_dataset(A , A )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ = features.copy() if features else default_expected_features
lowercase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ = ParquetDatasetReader(A , features=A , cache_dir=A ).read()
_check_parquet_dataset(A , A )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ = ParquetDatasetReader(A , cache_dir=A , split=A ).read()
_check_parquet_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __a ( A , A , A ):
'''simple docstring'''
if issubclass(A , A ):
lowercase__ = parquet_path
elif issubclass(A , A ):
lowercase__ = [parquet_path]
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ = ParquetDatasetReader(A , cache_dir=A ).read()
_check_parquet_dataset(A , A )
def __a ( A , A , A=("train",) ):
'''simple docstring'''
assert isinstance(A , A )
for split in splits:
lowercase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=A , keep_in_memory=A ).read()
_check_parquet_datasetdict(A , A )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ = features.copy() if features else default_expected_features
lowercase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ = ParquetDatasetReader({"train": parquet_path} , features=A , cache_dir=A ).read()
_check_parquet_datasetdict(A , A )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A , A , A ):
'''simple docstring'''
if split:
lowercase__ = {split: parquet_path}
else:
lowercase__ = "train"
lowercase__ = {"train": parquet_path, "test": parquet_path}
lowercase__ = tmp_path / "cache"
lowercase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ = ParquetDatasetReader(A , cache_dir=A ).read()
_check_parquet_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = ParquetDatasetWriter(A , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowercase__ = pq.ParquetFile(tmp_path / "foo.parquet" )
lowercase__ = pf.read()
assert dataset.data.table == output_table
def __a ( A , A ):
'''simple docstring'''
lowercase__ = str(shared_datadir / "test_image_rgb.jpg" )
lowercase__ = {"image": [image_path]}
lowercase__ = Features({"image": Image()} )
lowercase__ = Dataset.from_dict(A , features=A )
lowercase__ = ParquetDatasetWriter(A , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowercase__ = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowercase__ = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __a ( A , A ):
'''simple docstring'''
assert get_writer_batch_size(A ) == expected
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase_: Optional[Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
lowerCAmelCase_: int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
lowerCAmelCase_: List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: Dict = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
lowerCAmelCase_: List[Any] = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
lowerCAmelCase_: int = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: Dict = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
lowerCAmelCase_: str = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: List[Any] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
lowerCAmelCase_: int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: List[str] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
lowerCAmelCase_: Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
lowerCAmelCase_: Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
lowerCAmelCase_: Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
lowerCAmelCase_: Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
lowerCAmelCase_: Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
lowerCAmelCase_: List[str] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
lowerCAmelCase_: Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: int = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
lowerCAmelCase_: str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
lowerCAmelCase_: Optional[int] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
lowerCAmelCase_: Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: Tuple = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
lowerCAmelCase_: Dict = ""
lowerCAmelCase_: str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
lowerCAmelCase_: Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCAmelCase_: List[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __a ( A , A ):
'''simple docstring'''
assert ReadMe.from_string(A , A ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __a ( A , A ):
'''simple docstring'''
with pytest.raises(A , match=re.escape(expected_error.format(path="root" ) ) ):
lowercase__ = ReadMe.from_string(A , A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A , A ):
'''simple docstring'''
with pytest.raises(A , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(A , A )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A ):
'''simple docstring'''
ReadMe.from_string(A , A , suppress_parsing_errors=A )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __a ( A , A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(A ) / "README.md"
with open(A , "w+" ) as readme_file:
readme_file.write(A )
lowercase__ = ReadMe.from_readme(A , A ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __a ( A , A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(A ) / "README.md"
with open(A , "w+" ) as readme_file:
readme_file.write(A )
lowercase__ = expected_error.format(path=A )
with pytest.raises(A , match=re.escape(A ) ):
lowercase__ = ReadMe.from_readme(A , A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A , A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(A ) / "README.md"
with open(A , "w+" ) as readme_file:
readme_file.write(A )
lowercase__ = expected_error.format(path=A )
with pytest.raises(A , match=re.escape(A ) ):
ReadMe.from_readme(A , A )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __a ( A ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(A ) / "README.md"
with open(A , "w+" ) as readme_file:
readme_file.write(A )
ReadMe.from_readme(A , A , suppress_parsing_errors=A )
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(A ):
result *= n - i
result //= i + 1
return result
def __a ( A ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , A ) // (node_count + 1)
def __a ( A ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __a ( A ):
'''simple docstring'''
return catalan_number(A ) * factorial(A )
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@dataclass
class a__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
snake_case_ = None
class a__ ( _a ):
snake_case_ = "train"
snake_case_ = "dev"
snake_case_ = "test"
class a__ :
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase="[CLS]", _UpperCAmelCase=1, _UpperCAmelCase="[SEP]", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=-100, _UpperCAmelCase=0, _UpperCAmelCase=True, ):
'''simple docstring'''
lowercase__ = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowercase__ = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d", _UpperCAmelCase, len(_UpperCAmelCase ) )
lowercase__ = []
lowercase__ = []
for word, label in zip(example.words, example.labels ):
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
lowercase__ = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ = [cls_token] + tokens
lowercase__ = [pad_token_label_id] + label_ids
lowercase__ = [cls_token_segment_id] + segment_ids
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
lowercase__ = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
lowercase__ = ([pad_token] * padding_length) + input_ids
lowercase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s", example.guid )
logger.info("tokens: %s", " ".join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info("input_ids: %s", " ".join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info("input_mask: %s", " ".join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info("segment_ids: %s", " ".join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info("label_ids: %s", " ".join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( _a ):
snake_case_ = 42
snake_case_ = nn.CrossEntropyLoss().ignore_index
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase=False, _UpperCAmelCase = Split.train, ):
'''simple docstring'''
lowercase__ = os.path.join(
_UpperCAmelCase, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(_UpperCAmelCase ) ), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowercase__ = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowercase__ = token_classification_task.read_examples_from_file(_UpperCAmelCase, _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, cls_token_at_end=bool(model_type in ["xlnet"] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=_UpperCAmelCase, pad_on_left=bool(tokenizer.padding_side == "left" ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features, _UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self, _UpperCAmelCase ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
snake_case_ = 42
snake_case_ = -100
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase=False, _UpperCAmelCase = Split.train, ):
'''simple docstring'''
lowercase__ = token_classification_task.read_examples_from_file(_UpperCAmelCase, _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, cls_token_at_end=bool(model_type in ["xlnet"] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=_UpperCAmelCase, pad_on_left=bool(tokenizer.padding_side == "left" ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = tf.data.Dataset.from_generator(
_UpperCAmelCase, ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa), (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
), )
else:
lowercase__ = tf.data.Dataset.from_generator(
_UpperCAmelCase, ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa), (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self, _UpperCAmelCase ):
'''simple docstring'''
return self.features[i]
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a__ ( _a ):
snake_case_ = "MCTCTFeatureExtractor"
snake_case_ = "AutoTokenizer"
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self.feature_extractor
lowercase__ = False
def __call__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase__ = kwargs.pop("raw_speech" )
else:
lowercase__ = kwargs.pop("audio", _UpperCAmelCase )
lowercase__ = kwargs.pop("sampling_rate", _UpperCAmelCase )
lowercase__ = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
lowercase__ = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings["input_ids"]
return inputs
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = kwargs.pop("input_features", _UpperCAmelCase )
lowercase__ = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if input_features is not None:
lowercase__ = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
lowercase__ = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__ = labels["input_ids"]
return input_features
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def snake_case__ ( self ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a__ :
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = list(poly_a or [0] )[:]
lowercase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase__ = complex(mpmath.root(x=1, n=self.c_max_length, k=1 ) )
# The product
lowercase__ = self.__multiply()
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase__ = self.c_max_length // 2
while next_ncol > 0:
lowercase__ = [[] for i in range(_UpperCAmelCase )]
lowercase__ = self.root**next_ncol
# First half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase__ = new_dft
lowercase__ = next_ncol // 2
return dft[0]
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.__dft("A" )
lowercase__ = self.__dft("B" )
lowercase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase__ = 2
while next_ncol <= self.c_max_length:
lowercase__ = [[] for i in range(_UpperCAmelCase )]
lowercase__ = self.root ** (next_ncol // 2)
lowercase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase__ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase__ = [round(x[0].real, 8 ) + round(x[0].imag, 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
'''simple docstring'''
lowercase__ = "A = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase__ = "B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase__ = "A*B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def snake_case__ ( *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
pass
def __a ( A ):
'''simple docstring'''
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __a ( A ):
'''simple docstring'''
lowercase__ = np.array(A )
lowercase__ = npimg.shape
return {"hash": hashimage(A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
snake_case_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskGenerationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = pipeline("mask-generation", model="facebook/sam-vit-huge" )
lowercase__ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256 )
# Shortening by hashing
lowercase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
], )
# fmt: on
@require_torch
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "facebook/sam-vit-huge"
lowercase__ = pipeline("mask-generation", model=_UpperCAmelCase )
lowercase__ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
lowercase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
], )
| 668 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_: Dict = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class a__ ( _a ):
snake_case_ = "ernie_m"
snake_case_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self, _UpperCAmelCase = 25_0002, _UpperCAmelCase = 768, _UpperCAmelCase = 12, _UpperCAmelCase = 12, _UpperCAmelCase = 3072, _UpperCAmelCase = "gelu", _UpperCAmelCase = 0.1, _UpperCAmelCase = 0.1, _UpperCAmelCase = 514, _UpperCAmelCase = 0.02, _UpperCAmelCase = 1, _UpperCAmelCase = 1E-05, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = classifier_dropout
lowercase__ = is_decoder
lowercase__ = act_dropout
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_: Any = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase_: List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(A , A )
if weight_type is not None:
lowercase__ = getattr(A , A ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
elif weight_type == "inv_freq":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , A )
if "pos_bias_u" in name:
lowercase__ = None
elif "pos_bias_v" in name:
lowercase__ = None
elif "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
elif "running_mean" in name:
lowercase__ = "running_mean"
elif "inv_freq" in name:
lowercase__ = "inv_freq"
elif "running_var" in name:
lowercase__ = "running_var"
elif "num_batches_tracked" in name:
lowercase__ = "num_batches_tracked"
else:
lowercase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def __a ( A , A , A=None , A=None , A=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ = WavaVecaConformerConfig.from_pretrained(A , hidden_act="swish" )
else:
lowercase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase__ = "rotary"
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(A , "vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A , exist_ok=A )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(A , A )
lowercase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A , )
lowercase__ = True if config.feat_extract_norm == "layer" else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowercase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowercase__ = WavaVecaConformerForCTC(A )
else:
lowercase__ = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task="audio_pretraining" )
lowercase__ = fairseq.tasks.setup_task(A )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowercase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCAmelCase_: str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 668 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: Tuple = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a__ ( _a , _a ):
snake_case_ = "nat"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, _UpperCAmelCase=4, _UpperCAmelCase=3, _UpperCAmelCase=64, _UpperCAmelCase=[3, 4, 6, 5], _UpperCAmelCase=[2, 4, 8, 16], _UpperCAmelCase=7, _UpperCAmelCase=3.0, _UpperCAmelCase=True, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase="gelu", _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.0, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(_UpperCAmelCase )
lowercase__ = num_heads
lowercase__ = kernel_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
lowercase__ = layer_scale_init_value
lowercase__ = ["stem"] + [F'''stage{idx}''' for idx in range(1, len(_UpperCAmelCase ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase, out_indices=_UpperCAmelCase, stage_names=self.stage_names )
| 668 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=[30, 30], _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=10, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=None, _UpperCAmelCase=8, _UpperCAmelCase=10, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=_UpperCAmelCase )
lowercase__ = torch.rand(self.n_targets, 4, device=_UpperCAmelCase )
labels.append(_UpperCAmelCase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = YolosModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(pixel_values=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,), device=_UpperCAmelCase, dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets, 4, device=_UpperCAmelCase, dtype=torch.float )
labels.append(_UpperCAmelCase )
lowercase__ = labels
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowercase__ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states, len(_UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case__ ( self ):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __a ( ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
lowercase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]], device=_UpperCAmelCase, )
lowercase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]], device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
_UpperCAmelCase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(_UpperCAmelCase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(_UpperCAmelCase )
self.assertEqual(len(results["scores"] ), 5 )
self.assertTrue(torch.allclose(results["scores"], _UpperCAmelCase, atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist(), _UpperCAmelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :], _UpperCAmelCase ) )
| 668 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a__ :
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
return None
class a__ :
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
return None
class a__ ( unittest.TestCase ):
snake_case_ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase, "tf", 12, **_UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase, "pt", 12, **_UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ):
'''simple docstring'''
from transformers import BertModel
lowercase__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_UpperCAmelCase ) )
vocab_file.flush()
lowercase__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase__ = BertModel(BertConfig(vocab_size=len(_UpperCAmelCase ) ) )
model.save_pretrained(_UpperCAmelCase )
self._test_export(_UpperCAmelCase, "pt", 12, _UpperCAmelCase )
@require_tf
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ = self._test_export(_UpperCAmelCase, "tf", 12, **_UpperCAmelCase )
lowercase__ = quantize(Path(_UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ = self._test_export(_UpperCAmelCase, "pt", 12, **_UpperCAmelCase )
lowercase__ = quantize(_UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase__ = Path(_UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return path
except Exception as e:
self.fail(_UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ):
'''simple docstring'''
from transformers import BertModel
lowercase__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCAmelCase, _UpperCAmelCase, "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ):
'''simple docstring'''
from transformers import TFBertModel
lowercase__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCAmelCase, _UpperCAmelCase, "tf" )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = FeatureExtractionPipeline(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase__ , lowercase__ , lowercase__ , lowercase__ = infer_shapes(_UpperCAmelCase, _UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], _UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:], _UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"], {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"], {0: "batch"} )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ["input_ids", "attention_mask", "token_type_ids"]
lowercase__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase__ , lowercase__ = ensure_valid_input(FuncContiguousArgs(), _UpperCAmelCase, _UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_UpperCAmelCase ), 3 )
# Should have exactly the same input names
self.assertEqual(set(_UpperCAmelCase ), set(_UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_UpperCAmelCase, (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase__ , lowercase__ = ensure_valid_input(FuncNonContiguousArgs(), _UpperCAmelCase, _UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_UpperCAmelCase ), 1 )
self.assertEqual(len(_UpperCAmelCase ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens["input_ids"] )
self.assertEqual(ordered_input_names[0], "input_ids" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ), "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx", generated.as_posix() )
| 668 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_: Union[str, Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase_: Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase_: Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase_: List[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def snake_case__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence" ),
"references": datasets.Value("string", id="sequence" ),
} ), codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"], reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
], )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=0.9, _UpperCAmelCase=3, _UpperCAmelCase=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(_UpperCAmelCase ), word_tokenize(_UpperCAmelCase ), alpha=_UpperCAmelCase, beta=_UpperCAmelCase, gamma=_UpperCAmelCase )
for ref, pred in zip(_UpperCAmelCase, _UpperCAmelCase )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(_UpperCAmelCase, _UpperCAmelCase, alpha=_UpperCAmelCase, beta=_UpperCAmelCase, gamma=_UpperCAmelCase )
for ref, pred in zip(_UpperCAmelCase, _UpperCAmelCase )
]
return {"meteor": np.mean(_UpperCAmelCase )}
| 668 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Optional[Any] = {"vocab_file": "vocab.json"}
lowerCAmelCase_: Any = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase_: List[Any] = {"mgp-str": 2_7}
class a__ ( _a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, _UpperCAmelCase, _UpperCAmelCase="[GO]", _UpperCAmelCase="[GO]", _UpperCAmelCase="[s]", _UpperCAmelCase="[GO]", **_UpperCAmelCase ):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, **_UpperCAmelCase, )
with open(_UpperCAmelCase, encoding="utf-8" ) as vocab_handle:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = {v: k for k, v in self.vocab.items()}
@property
def snake_case__ ( self ):
'''simple docstring'''
return len(self.vocab )
def snake_case__ ( self ):
'''simple docstring'''
return dict(self.vocab, **self.added_tokens_encoder )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for s in text:
char_tokens.extend(_UpperCAmelCase )
return char_tokens
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase, self.vocab.get(self.unk_token ) )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(_UpperCAmelCase ) )
return
lowercase__ = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=_UpperCAmelCase, ensure_ascii=_UpperCAmelCase ) + "\n" )
return (vocab_file,)
| 668 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
import numpy as np
import datasets
lowerCAmelCase_: Any = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowerCAmelCase_: Optional[int] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowerCAmelCase_: List[str] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def snake_case__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float", id="sequence" ), id="X" ),
} ), )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = np.array(_UpperCAmelCase )
lowercase__ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(_UpperCAmelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(_UpperCAmelCase )
lowercase__ = np.dot(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = np.dot(_UpperCAmelCase, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 668 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 1 |
"""simple docstring"""
def __a ( A = 10 , A = 10_00 , A = True ):
'''simple docstring'''
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __a ( A , A ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __a ( A , A , A ):
'''simple docstring'''
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowercase__ = lower
lowercase__ = higher
lowercase__ = []
while True:
lowercase__ = get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
lowercase__ = number
elif answer(A ) == "high":
lowercase__ = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def __a ( ):
'''simple docstring'''
lowercase__ = int(input("Enter lower value : " ).strip() )
lowercase__ = int(input("Enter high value : " ).strip() )
lowercase__ = int(input("Enter value to guess : " ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __a ( A ):
'''simple docstring'''
lowercase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'''{test_file} instead.''' )
lowercase__ = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowercase__ = components[:-1] + [test_fn.replace(".py" , "" )]
lowercase__ = ".".join(A )
return test_module_path
def __a ( A ):
'''simple docstring'''
lowercase__ = get_module_path(A )
lowercase__ = importlib.import_module(A )
return test_module
def __a ( A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = get_test_module(A )
for attr in dir(A ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(A , A ) )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def __a ( A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = get_test_module(A )
for attr in dir(A ):
lowercase__ = getattr(A , A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase__ = getattr(A , "all_model_classes" , [] )
if len(A ) > 0:
test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def __a ( A ):
'''simple docstring'''
lowercase__ = get_test_classes(A )
lowercase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def __a ( A ):
'''simple docstring'''
lowercase__ = test_class()
if hasattr(A , "setUp" ):
test.setUp()
lowercase__ = None
if hasattr(A , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase__ = test.model_tester.__class__
return model_tester
def __a ( A , A ):
'''simple docstring'''
lowercase__ = get_test_classes(A )
lowercase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = get_test_classes_for_model(A , A )
lowercase__ = []
for test_class in test_classes:
lowercase__ = get_model_tester_from_test_class(A )
if tester_class is not None:
tester_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def __a ( A ):
'''simple docstring'''
lowercase__ = get_test_classes(A )
lowercase__ = {test_class: get_model_tester_from_test_class(A ) for test_class in test_classes}
return test_tester_mapping
def __a ( A ):
'''simple docstring'''
lowercase__ = get_model_classes(A )
lowercase__ = {
model_class: get_test_classes_for_model(A , A ) for model_class in model_classes
}
return model_test_mapping
def __a ( A ):
'''simple docstring'''
lowercase__ = get_model_classes(A )
lowercase__ = {
model_class: get_tester_classes_for_model(A , A ) for model_class in model_classes
}
return model_to_tester_mapping
def __a ( A ):
'''simple docstring'''
if isinstance(A , A ):
return o
elif isinstance(A , A ):
return o.__name__
elif isinstance(A , (list, tuple) ):
return [to_json(A ) for x in o]
elif isinstance(A , A ):
return {to_json(A ): to_json(A ) for k, v in o.items()}
else:
return o
| 668 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 1 |
"""simple docstring"""
import random
def __a ( A , A ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ = [], [], []
for element in data:
if element < pivot:
less.append(A )
elif element > pivot:
greater.append(A )
else:
equal.append(A )
return less, equal, greater
def __a ( A , A ):
'''simple docstring'''
if index >= len(A ) or index < 0:
return None
lowercase__ = items[random.randint(0 , len(A ) - 1 )]
lowercase__ = 0
lowercase__ , lowercase__ , lowercase__ = _partition(A , A )
lowercase__ = len(A )
lowercase__ = len(A )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A , A )
# must be in larger
else:
return quick_select(A , index - (m + count) )
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A , A , A , A , A ):
'''simple docstring'''
with open(A ) as metadata_file:
lowercase__ = json.load(A )
lowercase__ = LukeConfig(use_entity_aware_attention=A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(A , map_location="cpu" )
# Load the entity vocab file
lowercase__ = load_entity_vocab(A )
lowercase__ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken("<ent>" , lstrip=A , rstrip=A )
lowercase__ = AddedToken("<ent2>" , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A )
with open(os.path.join(A , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A , A )
lowercase__ = LukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
lowercase__ = state_dict["embeddings.word_embeddings.weight"]
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = f'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
lowercase__ = entity_emb[entity_vocab["[MASK]"]]
lowercase__ = LukeModel(config=A ).eval()
lowercase__ , lowercase__ = model.load_state_dict(A , strict=A )
if not (len(A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {', '.join(A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
lowercase__ = LukeTokenizer.from_pretrained(A , task="entity_classification" )
lowercase__ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowercase__ = (39, 42)
lowercase__ = tokenizer(A , entity_spans=[span] , add_prefix_space=A , return_tensors="pt" )
lowercase__ = model(**A )
# Verify word hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 42, 10_24) )
lowercase__ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowercase__ = torch.Size((1, 42, 7_68) )
lowercase__ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 1, 10_24) )
lowercase__ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowercase__ = torch.Size((1, 1, 7_68) )
lowercase__ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A ) )
model.save_pretrained(A )
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
with open(A , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A ):
lowercase__ , lowercase__ = line.rstrip().split("\t" )
lowercase__ = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCAmelCase_: str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = LEDTokenizer
snake_case_ = LEDTokenizerFast
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(_UpperCAmelCase, max_length=len(_UpperCAmelCase ), padding=_UpperCAmelCase, return_tensors="pt" )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="pt" )
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("labels", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(text_target=_UpperCAmelCase, max_length=32, padding="max_length", return_tensors="pt" )
self.assertEqual(32, targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"], padding=_UpperCAmelCase, truncation=_UpperCAmelCase, return_tensors="pt" )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(batch.input_ids.shape, (2, 5122) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ["A long paragraph for summarization."]
lowercase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(_UpperCAmelCase, return_tensors="pt" )
lowercase__ = tokenizer(text_target=_UpperCAmelCase, return_tensors="pt" )
lowercase__ = inputs["input_ids"]
lowercase__ = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = ["Summary of the text.", "Another summary."]
lowercase__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase )
lowercase__ = [[0] * len(_UpperCAmelCase ) for x in encoded_output["input_ids"]]
lowercase__ = tokenizer.pad(_UpperCAmelCase )
self.assertSequenceEqual(outputs["global_attention_mask"], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = self.tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = "A, <mask> AllenNLP sentence."
lowercase__ = tokenizer_r.encode_plus(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase )
lowercase__ = tokenizer_p.encode_plus(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase )
self.assertEqual(sum(tokens_r["token_type_ids"] ), sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ), sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ), )
lowercase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_UpperCAmelCase, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
return model
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2, output_type="numpy", generator=_UpperCAmelCase ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2, output_type="numpy", generator=_UpperCAmelCase, return_dict=_UpperCAmelCase )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "google/ncsnpp-church-256"
lowercase__ = UNetaDModel.from_pretrained(_UpperCAmelCase )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
lowercase__ = ScoreSdeVePipeline(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10, output_type="numpy", generator=_UpperCAmelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_: List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = XGLMTokenizer
snake_case_ = XGLMTokenizerFast
snake_case_ = True
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "<pad>"
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(len(_UpperCAmelCase ), 1008 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1008 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = XGLMTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowercase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def snake_case__ ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase, f.name )
lowercase__ = XGLMTokenizer(f.name, keep_accents=_UpperCAmelCase )
lowercase__ = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = "I was born in 92000, and this is falsé."
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "Hello World!"
lowercase__ = [2, 3_1227, 4447, 35]
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
lowercase__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="facebook/xglm-564M", padding=_UpperCAmelCase, )
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase_: Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a__ ( _a ):
def __init__( self, _UpperCAmelCase = 101 ):
'''simple docstring'''
lowercase__ = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self, _UpperCAmelCase ):
'''simple docstring'''
return i
class a__ :
def __call__( self, _UpperCAmelCase ):
'''simple docstring'''
return {"input_ids": torch.tensor(_UpperCAmelCase ), "labels": torch.tensor(_UpperCAmelCase )}
class a__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ = nn.Linear(120, 80 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class a__ ( _a ):
@require_torch_neuroncore
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F'''--output_dir {output_dir}'''.split()
lowercase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a__ ( _a ):
@require_torch_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F'''--output_dir {output_dir}'''.split()
lowercase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase_: List[str] = HfArgumentParser((TrainingArguments,))
lowerCAmelCase_: Union[str, Any] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
lowerCAmelCase_: List[str] = DummyDataset(dataset_length)
def __a ( A ):
'''simple docstring'''
lowercase__ = list(range(len(A ) ) )
lowercase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowerCAmelCase_: List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase_: str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_: List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_: List[str] = 2
lowerCAmelCase_: Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_: List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_: str = None
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
def __a ( A , A , A=False ):
'''simple docstring'''
if isinstance(A , A ) and isinstance(A , A ):
lowercase__ = len(set_a.intersection(A ) )
if alternative_union:
lowercase__ = len(A ) + len(A )
else:
lowercase__ = len(set_a.union(A ) )
return intersection / union
if isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) ):
lowercase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowercase__ = len(A ) + len(A )
return len(A ) / union
else:
lowercase__ = set_a + [element for element in set_b if element not in set_a]
return len(A ) / len(A )
return len(A ) / len(A )
return None
if __name__ == "__main__":
lowerCAmelCase_: List[str] = {"a", "b", "c", "d", "e"}
lowerCAmelCase_: Any = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Tuple = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class a__ :
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
def snake_case__ ( self ):
'''simple docstring'''
raise NotImplementedError()
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = False, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = tokenizer
lowercase__ = skip_prompt
lowercase__ = decode_kwargs
# variables used in the streaming process
lowercase__ = []
lowercase__ = 0
lowercase__ = True
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowercase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(_UpperCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase__ = text[self.print_len :]
self.print_len += len(_UpperCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase__ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_UpperCAmelCase )
self.on_finalized_text(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
if len(self.token_cache ) > 0:
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
else:
lowercase__ = ""
lowercase__ = True
self.on_finalized_text(_UpperCAmelCase, stream_end=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
print(_UpperCAmelCase, flush=_UpperCAmelCase, end="" if not stream_end else None )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase ):
'''simple docstring'''
super().__init__(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = Queue()
lowercase__ = None
lowercase__ = timeout
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
self.text_queue.put(_UpperCAmelCase, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self ):
'''simple docstring'''
return self
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class a__ ( _a , _a ):
@register_to_config
def __init__( self, _UpperCAmelCase = 768, ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(1, _UpperCAmelCase ) )
lowercase__ = nn.Parameter(torch.ones(1, _UpperCAmelCase ) )
def snake_case__ ( self, _UpperCAmelCase = None, _UpperCAmelCase = None, ):
'''simple docstring'''
lowercase__ = nn.Parameter(self.mean.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
lowercase__ = nn.Parameter(self.std.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
return self
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = (embeds * self.std) + self.mean
return embeds
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_: int = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __a ( A ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def __a ( A ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(A , id=A )
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
if len(A ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=7, _UpperCAmelCase=3, _UpperCAmelCase=18, _UpperCAmelCase=30, _UpperCAmelCase=400, _UpperCAmelCase=True, _UpperCAmelCase=32, _UpperCAmelCase=True, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size_divisor
lowercase__ = do_rescale
def snake_case__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class a__ ( _a , unittest.TestCase ):
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = GLPNImageProcessingTester(self )
@property
def snake_case__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase, "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "size_divisor" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "resample" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "do_rescale" ) )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 668 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( _a ):
snake_case_ = "levit"
def __init__( self, _UpperCAmelCase=224, _UpperCAmelCase=3, _UpperCAmelCase=3, _UpperCAmelCase=2, _UpperCAmelCase=1, _UpperCAmelCase=16, _UpperCAmelCase=[128, 256, 384], _UpperCAmelCase=[4, 8, 12], _UpperCAmelCase=[4, 4, 4], _UpperCAmelCase=[16, 16, 16], _UpperCAmelCase=0, _UpperCAmelCase=[2, 2, 2], _UpperCAmelCase=[2, 2, 2], _UpperCAmelCase=0.02, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase_: List[str] = True
except ImportError:
lowerCAmelCase_: Dict = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase_: Dict = _get_torch_home()
except ImportError:
lowerCAmelCase_: Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCAmelCase_: List[str] = os.path.join(torch_cache_home, "transformers")
lowerCAmelCase_: Dict = "https://cdn.huggingface.co"
lowerCAmelCase_: List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCAmelCase_: str = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCAmelCase_: Any = os.path.join(PATH, "config.yaml")
lowerCAmelCase_: Union[str, Any] = os.path.join(PATH, "attributes.txt")
lowerCAmelCase_: Dict = os.path.join(PATH, "objects.txt")
lowerCAmelCase_: int = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCAmelCase_: List[str] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase_: Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase_: Any = "pytorch_model.bin"
lowerCAmelCase_: Optional[int] = "config.yaml"
def __a ( A=OBJECTS , A=ATTRIBUTES ):
'''simple docstring'''
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
with open(A , "rb" ) as f:
lowercase__ = pkl.load(A )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase__ = ckp.pop(A )
if isinstance(A , np.ndarray ):
lowercase__ = torch.tensor(A )
else:
assert isinstance(A , torch.tensor ), type(A )
lowercase__ = v
return r
class a__ :
snake_case_ = {}
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = "root", _UpperCAmelCase=0 ):
'''simple docstring'''
lowercase__ = name
lowercase__ = level
lowercase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase__ = copy.deepcopy(_UpperCAmelCase )
lowercase__ = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = Config(_UpperCAmelCase, name=_UpperCAmelCase, level=level + 1 )
lowercase__ = v
setattr(self, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = val
lowercase__ = val
lowercase__ = key.split("." )
lowercase__ = len(_UpperCAmelCase ) - 1
lowercase__ = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self, _UpperCAmelCase ) and isinstance(getattr(self, _UpperCAmelCase ), _UpperCAmelCase ):
setattr(getattr(self, _UpperCAmelCase ), ".".join(levels[i:] ), _UpperCAmelCase )
if l == last_level:
lowercase__ = val
else:
lowercase__ = pointer[l]
def snake_case__ ( self ):
'''simple docstring'''
return self._pointer
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
dump(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
with open(_UpperCAmelCase ) as stream:
lowercase__ = load(_UpperCAmelCase, Loader=_UpperCAmelCase )
return data
def __str__( self ):
'''simple docstring'''
lowercase__ = " "
if self._name != "root":
lowercase__ = F'''{t * (self._level-1)}{self._name}:\n'''
else:
lowercase__ = ""
lowercase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n'''
lowercase__ = level
return r[:-1]
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = kwargs.pop("cache_dir", _UpperCAmelCase )
lowercase__ = kwargs.pop("force_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("resume_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("proxies", _UpperCAmelCase )
lowercase__ = kwargs.pop("local_files_only", _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
lowercase__ = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
lowercase__ = pretrained_model_name_or_path
else:
lowercase__ = hf_bucket_url(_UpperCAmelCase, filename=_UpperCAmelCase, use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowercase__ = cached_path(
_UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, proxies=_UpperCAmelCase, resume_download=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase__ = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
lowercase__ = "Can't load config for"
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __a ( A ):
'''simple docstring'''
lowercase__ = torch.load("dump.pt" , map_location=in_tensor.device )
lowercase__ = in_tensor.numpy()
lowercase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A , A , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(A , A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __a ( A ):
'''simple docstring'''
lowercase__ = urlparse(A )
return parsed.scheme in ("http", "https")
def __a ( A , A , A=True ):
'''simple docstring'''
lowercase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase__ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __a ( A , A , A=None , A=0 , A=None , ):
'''simple docstring'''
lowercase__ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A , A ):
ua += "; " + "; ".join("{}/{}".format(A , A ) for k, v in user_agent.items() )
elif isinstance(A , A ):
ua += "; " + user_agent
lowercase__ = {"user-agent": ua}
if resume_size > 0:
lowercase__ = "bytes=%d-" % (resume_size,)
lowercase__ = requests.get(A , stream=A , proxies=A , headers=A )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase__ = response.headers.get("Content-Length" )
lowercase__ = resume_size + int(A ) if content_length is not None else None
lowercase__ = tqdm(
unit="B" , unit_scale=A , total=A , initial=A , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A ) )
temp_file.write(A )
progress.close()
def __a ( A , A=None , A=False , A=None , A=10 , A=False , A=None , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
os.makedirs(A , exist_ok=A )
lowercase__ = None
if not local_files_only:
try:
lowercase__ = requests.head(A , allow_redirects=A , proxies=A , timeout=A )
if response.status_code == 2_00:
lowercase__ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase__ = url_to_filename(A , A )
# get cache path to put the file
lowercase__ = os.path.join(A , A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A ):
return cache_path
else:
lowercase__ = [
file
for file in fnmatch.filter(os.listdir(A ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A ) > 0:
return os.path.join(A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase__ = cache_path + ".lock"
with FileLock(A ):
# If the download just completed while the lock was activated.
if os.path.exists(A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase__ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A , "a+b" ) as f:
yield f
lowercase__ = _resumable_file_manager
if os.path.exists(A ):
lowercase__ = os.stat(A ).st_size
else:
lowercase__ = 0
else:
lowercase__ = partial(tempfile.NamedTemporaryFile , dir=A , delete=A )
lowercase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A , temp_file.name , )
http_get(
A , A , proxies=A , resume_size=A , user_agent=A , )
os.replace(temp_file.name , A )
lowercase__ = {"url": url, "etag": etag}
lowercase__ = cache_path + ".json"
with open(A , "w" ) as meta_file:
json.dump(A , A )
return cache_path
def __a ( A , A=None ):
'''simple docstring'''
lowercase__ = url.encode("utf-8" )
lowercase__ = shaaaa(A )
lowercase__ = url_hash.hexdigest()
if etag:
lowercase__ = etag.encode("utf-8" )
lowercase__ = shaaaa(A )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __a ( A , A=None , A=False , A=None , A=False , A=None , A=False , A=False , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
if isinstance(A , A ):
lowercase__ = str(A )
if is_remote_url(A ):
# URL, so get it from the cache (downloading if necessary)
lowercase__ = get_from_cache(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , user_agent=A , local_files_only=A , )
elif os.path.exists(A ):
# File, and it exists.
lowercase__ = url_or_filename
elif urlparse(A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A ) )
if extract_compressed_file:
if not is_zipfile(A ) and not tarfile.is_tarfile(A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase__ , lowercase__ = os.path.split(A )
lowercase__ = output_file.replace("." , "-" ) + "-extracted"
lowercase__ = os.path.join(A , A )
if os.path.isdir(A ) and os.listdir(A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase__ = output_path + ".lock"
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
os.makedirs(A )
if is_zipfile(A ):
with ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
elif tarfile.is_tarfile(A ):
lowercase__ = tarfile.open(A )
tar_file.extractall(A )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A ) )
return output_path_extracted
return output_path
def __a ( A , A="," ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
with open(A ) as f:
lowercase__ = eval(f.read() )
else:
lowercase__ = requests.get(A )
try:
lowercase__ = requests.json()
except Exception:
lowercase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase__ = eval(A )
except Exception:
lowercase__ = data.split("\n" )
req.close()
return data
def __a ( A ):
'''simple docstring'''
lowercase__ = requests.get(A )
lowercase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __a ( A ):
'''simple docstring'''
lowercase__ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A )
with open(A , "rb" ) as stream:
lowercase__ = pkl.load(A )
lowercase__ = weights.pop("model" )
lowercase__ = {}
for k, v in model.items():
lowercase__ = torch.from_numpy(A )
if "running_var" in k:
lowercase__ = torch.tensor([0] )
lowercase__ = k.replace("running_var" , "num_batches_tracked" )
lowercase__ = zero
return new
def __a ( ):
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(A , os.pardir ) )}/demo.ipynb''' )
def __a ( A , A="RGB" ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
lowercase__ = cva.imread(A )
else:
lowercase__ = get_image_from_url(A )
assert img is not None, f'''could not connect to: {im}'''
lowercase__ = cva.cvtColor(A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase__ = img[:, :, ::-1]
return img
def __a ( A , A=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A ) , A ))
| 668 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = DistilBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = DistilBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = DistilBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = DistilBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = DistilBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = DistilBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case_ = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = DistilBertModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, dim=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DistilBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase, (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase, os.path.join(_UpperCAmelCase, "traced_model.pt" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase, "traced_model.pt" ), map_location=_UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(_UpperCAmelCase ), inputs_dict["attention_mask"].to(_UpperCAmelCase ) )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
| 668 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: Tuple = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( _a ):
snake_case_ = "decision_transformer"
snake_case_ = ["past_key_values"]
snake_case_ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=17, _UpperCAmelCase=4, _UpperCAmelCase=128, _UpperCAmelCase=4096, _UpperCAmelCase=True, _UpperCAmelCase=1, _UpperCAmelCase=1024, _UpperCAmelCase=3, _UpperCAmelCase=1, _UpperCAmelCase=None, _UpperCAmelCase="relu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.02, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=5_0256, _UpperCAmelCase=5_0256, _UpperCAmelCase=False, _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = state_dim
lowercase__ = act_dim
lowercase__ = hidden_size
lowercase__ = max_ep_len
lowercase__ = action_tanh
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = scale_attn_weights
lowercase__ = use_cache
lowercase__ = scale_attn_by_inverse_layer_idx
lowercase__ = reorder_and_upcast_attn
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
| 668 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase_: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase_: list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase_: set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase_: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __a ( A , A ):
'''simple docstring'''
lowercase__ = ""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
for keychar, cipherchar in zip(cycle(A ) , A ):
lowercase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(A )
return decoded
def __a ( A ):
'''simple docstring'''
lowercase__ = []
for key in product(A , repeat=3 ):
lowercase__ = try_key(A , A )
if encoded is not None:
possibles.append(A )
return possibles
def __a ( A , A ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __a ( A = "p059_cipher.txt" ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = Path(A ).parent.joinpath(A ).read_text(encoding="utf-8" )
lowercase__ = [int(A ) for number in data.strip().split("," )]
lowercase__ = filter_valid_chars(A )
for common_word in COMMON_WORDS:
lowercase__ = filter_common_word(A , A )
if len(A ) == 1:
break
lowercase__ = possibles[0]
return sum(ord(A ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a__ :
def __init__( self, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = "gelu"
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TFEsmModel(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TFEsmModel(config=_UpperCAmelCase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase, encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TFEsmForMaskedLM(config=_UpperCAmelCase )
lowercase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFEsmForTokenClassification(config=_UpperCAmelCase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = TFEsmModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ = model.get_bias()
assert isinstance(_UpperCAmelCase, _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase, tf.Variable )
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
@require_tf
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ), _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2 ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 668 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __a ( A ):
'''simple docstring'''
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a__ :
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCAmelCase, _UpperCAmelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
lowercase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
lowercase__ = after_output[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(
input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, output_attentions=_UpperCAmelCase )
lowercase__ = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = to_atuple(vision_model.config.image_size )
lowercase__ = to_atuple(vision_model.config.patch_size )
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pt_model.to(_UpperCAmelCase )
pt_model.eval()
# prepare inputs
lowercase__ = inputs_dict
lowercase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase__ = pt_model(**_UpperCAmelCase ).to_tuple()
lowercase__ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase, from_pt=_UpperCAmelCase )
lowercase__ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase, from_flax=_UpperCAmelCase )
pt_model_loaded.to(_UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowercase__ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), _UpperCAmelCase )
lowercase__ = fx_state
self.check_pt_flax_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase, fx_model.params )
self.check_pt_flax_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase )
@is_pt_flax_cross_test
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_inputs_dict.pop("vision_config" )
lowercase__ = config_inputs_dict.pop("text_config" )
lowercase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
self.check_equivalence_flax_to_pt(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_pretrained_model_and_inputs()
lowercase__ = model_a(**_UpperCAmelCase )
lowercase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
lowercase__ = model_a(**_UpperCAmelCase )
lowercase__ = after_outputs[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase, 1E-5 )
@require_flax
class a__ ( _a , unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=_UpperCAmelCase, text_from_pt=_UpperCAmelCase, )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = FlaxViTModel(_UpperCAmelCase )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxViTModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = vit_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a__ ( _a , unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=_UpperCAmelCase, text_from_pt=_UpperCAmelCase, )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModel(_UpperCAmelCase )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = clip_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ = processor(
text=["una foto di un gatto", "una foto di un cane"], images=_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="np" )
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowercase__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, _UpperCAmelCase, atol=1E-3 ) )
| 668 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_: List[Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase_: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
lowercase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __a ( A ):
'''simple docstring'''
lowercase__ = 0
while number > 0:
lowercase__ = number % 10
sum_of_digits += last_digit
lowercase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __a ( A = 1_00 ):
'''simple docstring'''
lowercase__ = factorial(A )
lowercase__ = split_and_add(A )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a__ ( _a ):
snake_case_ = "wavlm"
def __init__( self, _UpperCAmelCase=32, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase="group", _UpperCAmelCase="gelu", _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512), _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2), _UpperCAmelCase=False, _UpperCAmelCase=128, _UpperCAmelCase=16, _UpperCAmelCase=320, _UpperCAmelCase=800, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=0.05, _UpperCAmelCase=10, _UpperCAmelCase=2, _UpperCAmelCase=0.0, _UpperCAmelCase=10, _UpperCAmelCase=320, _UpperCAmelCase=2, _UpperCAmelCase=0.1, _UpperCAmelCase=100, _UpperCAmelCase=256, _UpperCAmelCase=256, _UpperCAmelCase=0.1, _UpperCAmelCase="mean", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=256, _UpperCAmelCase=(512, 512, 512, 512, 1500), _UpperCAmelCase=(5, 3, 3, 1, 1), _UpperCAmelCase=(1, 2, 3, 1, 1), _UpperCAmelCase=512, _UpperCAmelCase=80, _UpperCAmelCase=0, _UpperCAmelCase=1, _UpperCAmelCase=2, _UpperCAmelCase=False, _UpperCAmelCase=3, _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = conv_bias
lowercase__ = num_buckets
lowercase__ = max_bucket_distance
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = num_ctc_classes
lowercase__ = vocab_size
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
lowercase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = xvector_output_dim
@property
def snake_case__ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=32, _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=16, _UpperCAmelCase=[1, 2, 1], _UpperCAmelCase=[2, 2, 4], _UpperCAmelCase=2, _UpperCAmelCase=2.0, _UpperCAmelCase=True, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase="gelu", _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=True, _UpperCAmelCase=10, _UpperCAmelCase=8, _UpperCAmelCase=["stage1", "stage2", "stage3"], _UpperCAmelCase=[1, 2, 3], ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
lowercase__ = ["stem"]
lowercase__ = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
'''simple docstring'''
return
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase ):
lowercase__ = 0
return t
def check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase={} ):
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase, _UpperCAmelCase ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class a__ ( unittest.TestCase , _a ):
snake_case_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case_ = MaskFormerSwinConfig
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
lowercase__ = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
lowercase__ = len(A )
lowercase__ = len(A )
lowercase__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__ = []
for char_count in range(A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
lowerCAmelCase_: List[str] = {str(digit): digit**5 for digit in range(1_0)}
def __a ( A ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __a ( ):
'''simple docstring'''
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __a ( A = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
lowercase__ = BeautifulSoup(requests.get(A ).text , "html.parser" )
lowercase__ = soup.findAll("h1" )
lowercase__ = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(A , A )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
return "".join(chr(ord(A ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=64, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=64, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MPNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MPNetForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MPNetForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = MPNetForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MPNetForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MPNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_UpperCAmelCase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __a ( ):
'''simple docstring'''
lowercase__ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowercase__ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , "func" ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.