code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A__ ( UpperCamelCase_ ):
lowercase = """mobilenet_v1"""
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=1.0 , UpperCamelCase__=8 , UpperCamelCase__="relu6" , UpperCamelCase__=True , UpperCamelCase__=0.999 , UpperCamelCase__=0.02 , UpperCamelCase__=0.001 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A_ = num_channels
A_ = image_size
A_ = depth_multiplier
A_ = min_depth
A_ = hidden_act
A_ = tf_padding
A_ = classifier_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
class A__ ( UpperCamelCase_ ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""", """--pretrained_model_name_or_path""", type=__UpperCAmelCase, default=__UpperCAmelCase, required=__UpperCAmelCase, help="""Path to pretrained model or model identifier from huggingface.co/models.""", )
parser.add_argument(
"""-c""", """--caption""", type=__UpperCAmelCase, default="""robotic cat with wings""", help="""Text used to generate images.""", )
parser.add_argument(
"""-n""", """--images_num""", type=__UpperCAmelCase, default=4, help="""How much images to generate.""", )
parser.add_argument(
"""-s""", """--seed""", type=__UpperCAmelCase, default=42, help="""Seed for random process.""", )
parser.add_argument(
"""-ci""", """--cuda_id""", type=__UpperCAmelCase, default=0, help="""cuda_id.""", )
A_ = parser.parse_args()
return args
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
if not len(__UpperCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
A_ , A_ = imgs[0].size
A_ = Image.new("""RGB""", size=(cols * w, rows * h) )
A_ , A_ = grid.size
for i, img in enumerate(__UpperCAmelCase ):
grid.paste(__UpperCAmelCase, box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__="robotic cat with wings", UpperCAmelCase__=7.5, UpperCAmelCase__=50, UpperCAmelCase__=1, UpperCAmelCase__=42, ) -> Optional[Any]:
A_ = torch.Generator(pipeline.device ).manual_seed(__UpperCAmelCase )
A_ = pipeline(
__UpperCAmelCase, guidance_scale=__UpperCAmelCase, num_inference_steps=__UpperCAmelCase, generator=__UpperCAmelCase, num_images_per_prompt=__UpperCAmelCase, ).images
A_ = int(math.sqrt(__UpperCAmelCase ) )
A_ = image_grid(__UpperCAmelCase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__lowerCamelCase = parse_args()
# Load models and create wrapper for stable diffusion
__lowerCamelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__lowerCamelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__lowerCamelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__lowerCamelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__lowerCamelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__lowerCamelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__lowerCamelCase = unet.to(torch.device('''cuda''', args.cuda_id))
__lowerCamelCase = pipeline.to(unet.device)
__lowerCamelCase , __lowerCamelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__lowerCamelCase = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = abs(lowerCAmelCase__ )
A_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = abs(lowerCAmelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
return sum(int(lowerCAmelCase__ ) for c in str(abs(lowerCAmelCase__ ) ) )
def UpperCAmelCase__ ( ) -> int:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
A_ = F'''{func.__name__}({value})'''
A_ = timeit(F'''__main__.{call}''', setup="""import __main__""" )
print(F'''{call:56} = {func(lowerCAmelCase__ )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase__, lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__lowerCamelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
for tf_name, hf_name in patterns:
A_ = k.replace(__A, __A )
return k
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = BigBirdPegasusConfig(**__A )
A_ = BigBirdPegasusForConditionalGeneration(__A )
A_ = torch_model.state_dict()
A_ = {}
# separating decoder weights
A_ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
A_ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(), """tf -> hf conversion""" ):
A_ = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
A_ = DECODER_PATTERNS
A_ = rename_state_dict_key(__A, __A )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A_ = v.T
A_ = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), """tf -> hf conversion""" ):
A_ = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
A_ = REMAINING_PATTERNS
A_ = rename_state_dict_key(__A, __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A_ = v.T
A_ = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ = mapping["""model.embed_positions.weight"""]
A_ = mapping.pop("""model.embed_positions.weight""" )
A_ = torch_model.load_state_dict(__A, strict=__A )
A_ = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = tf.train.list_variables(__A )
A_ = {}
A_ = ["""global_step"""]
for name, shape in tqdm(__A, desc="""converting tf checkpoint to dict""" ):
A_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ = tf.train.load_variable(__A, __A )
A_ = array
return tf_weights
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = get_tf_weights_as_numpy(__A )
A_ = convert_bigbird_pegasus(__A, __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00_00 ) -> List[Any]:
A_ = 0
for outer_width in range(3, (limit // 4) + 2 ):
if outer_width**2 > limit:
A_ = max(ceil(sqrt(outer_width**2 - limit ) ), 1 )
else:
A_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCamelCase = None
__lowerCamelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCamelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class A__ :
lowercase = True
lowercase = None
# Automatically constructed
lowercase = "PIL.Image.Image"
lowercase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase = field(default="Image" , init=_A , repr=_A )
def __call__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.pa_type
def snake_case_ ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A_ = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
A_ = {}
A_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__lowerCamelCase ):
A_ = PIL.Image.open(__lowerCamelCase )
else:
A_ = path.split("""::""" )[-1]
try:
A_ = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
A_ = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
A_ = None
with xopen(__lowerCamelCase , """rb""" , use_auth_token=__lowerCamelCase ) as f:
A_ = BytesIO(f.read() )
A_ = PIL.Image.open(bytes_ )
else:
A_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_ ( self , UpperCamelCase__ ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
A_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ = storage.field("""bytes""" )
else:
A_ = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ = storage.field("""path""" )
else:
A_ = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
A_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def snake_case_ ( self , UpperCamelCase__ ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase__ ):
with xopen(__lowerCamelCase , """rb""" ) as f:
A_ = f.read()
return bytes_
A_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def UpperCAmelCase__ ( ) -> Optional[int]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = BytesIO()
if image.format in list_image_compression_formats():
A_ = image.format
else:
A_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_lowerCamelCase, format=_lowerCamelCase )
return buffer.getvalue()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
if hasattr(_lowerCamelCase, """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
A_ = array.dtype
A_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A_ = dtype.kind
A_ = dtype.itemsize
A_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ = dtype_byteorder + dtype_kind + str(_lowerCamelCase )
A_ = np.dtype(_lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ = PIL.Image.fromarray(array.astype(_lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
A_ = first_non_null_value(_lowerCamelCase )
if isinstance(_lowerCamelCase, _lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowerCamelCase, np.ndarray ):
A_ = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
elif isinstance(_lowerCamelCase, PIL.Image.Image ):
A_ = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__lowerCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
__lowerCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class A__ ( _UpperCAmelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ElectraTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase_ ) != tokenize_chinese_chars
):
A_ = getattr(lowerCamelCase_ , normalizer_state.pop("""type""" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**lowerCamelCase_ )
A_ = do_lower_case
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
'''simple docstring'''
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
A_ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCamelCase = ['''gpt2''']
__lowerCamelCase = '''gpt2'''
if is_tf_available():
class A__ ( tf.Module ):
def __init__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__()
A_ = tokenizer
A_ = AutoConfig.from_pretrained(__UpperCamelCase )
A_ = TFGPTaLMHeadModel.from_config(__UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.tokenizer(__UpperCamelCase )
A_ = tokenized["""input_ids"""].to_tensor()
A_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ = self.model(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A_ = [GPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
A_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ = tokenizer([test_inputs] , return_tensors="""tf""" )
A_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ = python_outputs[key].numpy()
A_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ = tf.function(__UpperCamelCase )
for test_inputs in self.test_sentences:
A_ = tf.constant(__UpperCamelCase )
A_ = compiled_tokenizer(__UpperCamelCase )
A_ = tf_tokenizer(__UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ = ModelToSave(tokenizer=__UpperCamelCase )
A_ = tf.convert_to_tensor([self.test_sentences[0]] )
A_ = model.serving(__UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ = Path(__UpperCamelCase ) / """saved.model"""
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={"""serving_default""": model.serving} )
A_ = tf.saved_model.load(__UpperCamelCase )
A_ = loaded_model.signatures["""serving_default"""](__UpperCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ = tf.convert_to_tensor([self.test_sentences[0]] )
A_ = tf_tokenizer(__UpperCamelCase ) # Build model with some sample inputs
A_ = tf_tokenizer.get_config()
A_ = TFGPTaTokenizer.from_config(__UpperCamelCase )
A_ = model_from_config(__UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ = 123123
for max_length in [3, 5, 1024]:
A_ = tf.convert_to_tensor([self.test_sentences[0]] )
A_ = tf_tokenizer(__UpperCamelCase , max_length=__UpperCamelCase )
A_ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
for attribute in key.split(""".""" ):
A_ = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A_ = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
elif weight_type == "running_mean":
A_ = value
elif weight_type == "running_var":
A_ = value
elif weight_type == "num_batches_tracked":
A_ = value
elif weight_type == "inv_freq":
A_ = value
else:
A_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A_ = mapped_key.replace("""*""", UpperCAmelCase__ )
if "pos_bias_u" in name:
A_ = None
elif "pos_bias_v" in name:
A_ = None
elif "weight_g" in name:
A_ = """weight_g"""
elif "weight_v" in name:
A_ = """weight_v"""
elif "bias" in name:
A_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = """weight"""
elif "running_mean" in name:
A_ = """running_mean"""
elif "inv_freq" in name:
A_ = """inv_freq"""
elif "running_var" in name:
A_ = """running_var"""
elif "num_batches_tracked" in name:
A_ = """num_batches_tracked"""
else:
A_ = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
A_ = full_name.split("""conv_layers.""" )[-1]
A_ = name.split(""".""" )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=None, UpperCAmelCase__=True ) -> Any:
if config_path is not None:
A_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase__, hidden_act="""swish""" )
else:
A_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
A_ = """rotary"""
if is_finetuned:
if dict_path:
A_ = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(UpperCAmelCase__, """vocab.json""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A_ = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ = 0
A_ = 1
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = WavaVecaCTCTokenizer(
UpperCAmelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=UpperCAmelCase__, )
A_ = True if config.feat_extract_norm == """layer""" else False
A_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, )
A_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
A_ = WavaVecaConformerForCTC(UpperCAmelCase__ )
else:
A_ = WavaVecaConformerForPreTraining(UpperCAmelCase__ )
if is_finetuned:
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ = argparse.Namespace(task="""audio_pretraining""" )
A_ = fairseq.tasks.setup_task(UpperCAmelCase__ )
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=UpperCAmelCase__ )
A_ = model[0].eval()
recursively_load_weights(UpperCAmelCase__, UpperCAmelCase__, not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
return (data["data"], data["target"])
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = XGBClassifier()
classifier.fit(__UpperCamelCase, __UpperCamelCase )
return classifier
def UpperCAmelCase__ ( ) -> List[str]:
A_ = load_iris()
A_ = data_handling(__UpperCamelCase )
A_ = train_test_split(
__UpperCamelCase, __UpperCamelCase, test_size=0.25 )
A_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
A_ = xgboost(__UpperCamelCase, __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, display_labels=__UpperCamelCase, cmap="""Blues""", normalize="""true""", )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A__ ( _lowercase , _lowercase ):
snake_case = '''pixel_values'''
snake_case = False
snake_case = TimmBackboneConfig
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , """timm""" )
super().__init__(A_ )
A_ = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(A_ , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
A_ = getattr(A_ , """use_pretrained_backbone""" , A_ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
A_ = config.out_indices if getattr(A_ , """out_indices""" , A_ ) is not None else (-1,)
A_ = timm.create_model(
config.backbone , pretrained=A_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=A_ , **A_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ = self._backbone.return_layers
A_ = {layer["""module"""]: str(A_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(A_ )
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
A_ = kwargs.pop("""config""" , TimmBackboneConfig() )
A_ = kwargs.pop("""use_timm_backbone""" , A_ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
A_ = kwargs.pop("""num_channels""" , config.num_channels )
A_ = kwargs.pop("""features_only""" , config.features_only )
A_ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
A_ = kwargs.pop("""out_indices""" , config.out_indices )
A_ = TimmBackboneConfig(
backbone=A_ , num_channels=A_ , features_only=A_ , use_pretrained_backbone=A_ , out_indices=A_ , )
return super()._from_config(A_ , **A_ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ = self._all_layers
A_ = self._backbone(A_ , **A_ )
A_ = self._return_layers
A_ = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ = self._backbone(A_ , **A_ )
A_ = None
A_ = tuple(A_ )
A_ = tuple(A_ ) if hidden_states is not None else None
if not return_dict:
A_ = (feature_maps,)
if output_hidden_states:
A_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=A_ , hidden_states=A_ , attentions=A_ )
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
def decorator(UpperCAmelCase__ ):
A_ = getattr(__SCREAMING_SNAKE_CASE, """handle_key""", [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE, """handle_key""", __SCREAMING_SNAKE_CASE )
return func
return decorator
def UpperCAmelCase__ ( *UpperCAmelCase__ ) -> str:
def decorator(UpperCAmelCase__ ):
A_ = getattr(__SCREAMING_SNAKE_CASE, """handle_key""", [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE, """handle_key""", __SCREAMING_SNAKE_CASE )
return func
return decorator
class A__ ( lowercase__ ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , """key_handler""" ):
setattr(UpperCamelCase__ , """key_handler""" , {} )
setattr(UpperCamelCase__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A_ = getattr(UpperCamelCase__ , """handle_key""" , [] )
for key in handled_keys:
A_ = value
return new_cls
@staticmethod
def snake_case_ ( cls ) -> List[str]:
'''simple docstring'''
A_ = get_character()
if char != KEYMAP["undefined"]:
A_ = ord(UpperCamelCase__ )
A_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
A_ = char
return handler(cls )
else:
return None
def UpperCAmelCase__ ( cls ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
A_ = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , x.transpose() ) )
A_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , np.asarray(transpose(UpperCamelCase__ ) ) ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) ) ) )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.reshape(UpperCamelCase__ , (4, 3) ) ) )
A_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.reshape(UpperCamelCase__ , (12, 5) ) ) )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.asarray(reshape(UpperCamelCase__ , (4, 3) ) ) ) )
A_ = np.random.randn(3 , 4 , 5 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.asarray(reshape(UpperCamelCase__ , (12, 5) ) ) ) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.squeeze(UpperCamelCase__ ) ) )
A_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.squeeze(UpperCamelCase__ , axis=2 ) ) )
@require_torch
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = np.random.randn(1 , 3 , 4 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
A_ = np.random.randn(1 , 4 , 1 , 5 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = np.random.randn(1 , 3 , 4 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
A_ = np.random.randn(1 , 4 , 1 , 5 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = np.random.randn(1 , 3 , 4 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.asarray(squeeze(UpperCamelCase__ ) ) ) )
A_ = np.random.randn(1 , 4 , 1 , 5 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.asarray(squeeze(UpperCamelCase__ , axis=2 ) ) ) )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.expand_dims(UpperCamelCase__ , axis=1 ) ) )
@require_torch
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = np.random.randn(3 , 4 )
A_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase__ , axis=1 ) ) ) )
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCamelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCamelCase = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__lowerCamelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCamelCase = """allenai"""
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A_ = dict((re.sub(r"""@@$""", """""", snake_case_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""", """</w>""", snake_case_ ), v) for k, v in d.items() )
A_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
A_ = d[k] # restore
return da
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
# prep
assert os.path.exists(snake_case_ )
os.makedirs(snake_case_, exist_ok=snake_case_ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
A_ = basename(snake_case_ )
A_ = dirname(snake_case_ )
A_ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A_ = cls.hub_models()
A_ = {"bpe": "fastbpe", "tokenizer": "moses"}
A_ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
A_ = hub_utils.from_pretrained(
snake_case_, snake_case_, snake_case_, archive_map=snake_case_, **snake_case_ )
A_ = vars(chkpt["""args"""]["""model"""] )
A_ = args["source_lang"]
A_ = args["target_lang"]
A_ = dirname(snake_case_ )
A_ = basename(snake_case_ )
# dicts
A_ = os.path.join(snake_case_, F'''dict.{src_lang}.txt''' )
A_ = os.path.join(snake_case_, F'''dict.{tgt_lang}.txt''' )
A_ = Dictionary.load(snake_case_ )
A_ = rewrite_dict_keys(src_dict.indices )
A_ = len(snake_case_ )
A_ = os.path.join(snake_case_, """vocab-src.json""" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(snake_case_, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A_ = True
for k in src_vocab.keys():
if not k.islower():
A_ = False
break
A_ = Dictionary.load(snake_case_ )
A_ = rewrite_dict_keys(tgt_dict.indices )
A_ = len(snake_case_ )
A_ = os.path.join(snake_case_, """vocab-tgt.json""" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(snake_case_, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# merges_file (bpecodes)
A_ = os.path.join(snake_case_, VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A_ = os.path.join(snake_case_, snake_case_ )
if os.path.exists(snake_case_ ):
break
with open(snake_case_, encoding="""utf-8""" ) as fin:
A_ = fin.read()
A_ = re.sub(r""" \d+$""", """""", snake_case_, 0, re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(snake_case_, """w""", encoding="""utf-8""" ) as fout:
fout.write(snake_case_ )
# model config
A_ = os.path.join(snake_case_, """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
A_ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
A_ = 5
A_ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A_ = best_score_hparams[model_dir]["length_penalty"]
else:
A_ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(snake_case_, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# tokenizer config
A_ = os.path.join(snake_case_, snake_case_ )
A_ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 10_24,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(snake_case_, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# model
A_ = chkpt["models"][0]
A_ = model.state_dict()
# rename keys to start with 'model.'
A_ = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A_ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(snake_case_, snake_case_ )
A_ = FSMTConfig.from_pretrained(snake_case_ )
A_ = FSMTForConditionalGeneration(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_, strict=snake_case_ )
# save
A_ = os.path.join(snake_case_, snake_case_ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(snake_case_, snake_case_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase__ ( UpperCAmelCase__ = 50_00 ) -> Optional[int]:
A_ = [(i * (3 * i - 1)) // 2 for i in range(1, _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase, len(_lowerCamelCase ) ):
A_ = pentagonal_nums[j]
A_ = pentagonal_i + pentagonal_j
A_ = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=10 , UpperCamelCase__=3 , UpperCamelCase__=32 * 8 , UpperCamelCase__=32 * 8 , UpperCamelCase__=4 , UpperCamelCase__=64 , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = is_training
A_ = use_auxiliary_loss
A_ = num_queries
A_ = num_channels
A_ = min_size
A_ = max_size
A_ = num_labels
A_ = hidden_dim
A_ = hidden_dim
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
A_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
A_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
A_ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
A_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
A_ = self.num_queries
A_ = self.num_labels
A_ = [1, 1, 1, 1]
A_ = self.num_channels
A_ = 64
A_ = 128
A_ = self.hidden_dim
A_ = self.hidden_dim
A_ = self.hidden_dim
return config
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = output.encoder_hidden_states
A_ = output.pixel_decoder_hidden_states
A_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
A_ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
A_ = model(pixel_values=_a , pixel_mask=_a )
A_ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(UpperCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A_ = model(pixel_values=_a , pixel_mask=_a )
A_ = model(_a )
comm_check_on_output(_a )
A_ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = MaskaFormerModelTester(self )
A_ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_a )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A_ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = (self.model_tester.min_size,) * 2
A_ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 10 , device=_a ).long(),
}
A_ = self.model_tester.get_config()
A_ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
A_ = model(**_a )
self.assertTrue(outputs.loss is not None )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_a ).to(_a )
A_ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
A_ = self.all_model_classes[1]
A_ = self.model_tester.prepare_config_and_inputs()
A_ = model_class(_a )
model.to(_a )
model.train()
A_ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.all_model_classes[1]
A_ = self.model_tester.prepare_config_and_inputs()
A_ = True
A_ = True
A_ = model_class(_a ).to(_a )
model.train()
A_ = model(_a , mask_labels=_a , class_labels=_a )
A_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCamelCase = 1e-4
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(_a , return_tensors="""pt""" ).to(_a )
A_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
A_ = model(**_a )
A_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
A_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
A_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(_a , return_tensors="""pt""" ).to(_a )
A_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
A_ = model(**_a )
# masks_queries_logits
A_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
A_ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
A_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
A_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
A_ = self.default_image_processor
A_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
A_ = inputs["""pixel_values"""].to(_a )
A_ = [el.to(_a ) for el in inputs["""mask_labels"""]]
A_ = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
A_ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[2, 2, 3, 2] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=["stage2", "stage3", "stage4"] , UpperCamelCase__=[2, 3, 4] , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_stages
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = intermediate_size
A_ = hidden_act
A_ = num_labels
A_ = initializer_range
A_ = out_features
A_ = out_indices
A_ = scope
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> str:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
A_ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ = None
A_ = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ConvNextVaModelTester(self )
A_ = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> str:
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
A_ = model_class(a_ )
model.to(a_ )
model.train()
A_ = self._prepare_for_class(a_ , a_ , return_labels=a_ )
A_ = model(**a_ ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = False
A_ = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
A_ = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(a_ , a_ , return_labels=a_ )
A_ = model(**a_ ).loss
loss.backward()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(a_ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(a_ , a_ ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ( ) -> List[str]:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = preprocessor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
A_ = model(**a_ )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
A_ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__lowerCamelCase = datasets.logging.get_logger(__name__)
__lowerCamelCase = '''\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n'''
__lowerCamelCase = '''\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'''
__lowerCamelCase = '''\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False, UpperCAmelCase__=False, UpperCAmelCase__=True, UpperCAmelCase__=False, UpperCAmelCase__="dummy_doc" ) -> Union[str, Any]:
A_ = {doc: key_lines}
A_ = {doc: sys_lines}
A_ = {}
A_ = 0
A_ = 0
A_ = 0
A_ = 0
A_ = 0
A_ = 0
A_ , A_ = reader.get_doc_mentions(__snake_case, key_doc_lines[doc], __snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
A_ = reader.set_annotated_parse_trees(__snake_case, key_doc_lines[doc], __snake_case, __snake_case )
A_ , A_ = reader.get_doc_mentions(__snake_case, sys_doc_lines[doc], __snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
A_ = reader.set_annotated_parse_trees(__snake_case, key_doc_lines[doc], __snake_case, __snake_case )
if remove_nested:
A_ , A_ = reader.remove_nested_coref_mentions(__snake_case, __snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A_ , A_ = reader.remove_nested_coref_mentions(__snake_case, __snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A_ = reader.get_mention_assignments(__snake_case, __snake_case )
A_ = reader.get_mention_assignments(__snake_case, __snake_case )
A_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = get_coref_infos(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
A_ = {}
A_ = 0
A_ = 0
for name, metric in metrics:
A_ , A_ , A_ = evaluator.evaluate_documents(__snake_case, __snake_case, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ), F'''Recall: {recall * 1_00:.2f}''', F''' Precision: {precision * 1_00:.2f}''', F''' F1: {fa * 1_00:.2f}''', )
if conll_subparts_num == 3:
A_ = (conll / 3) * 1_00
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A_ = line.split()[5]
if not parse_col == "-":
A_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False ) -> Optional[Any]:
'''simple docstring'''
A_ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A_ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use \'min_span\'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A_ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCamelCase = parser.parse_args()
if args.model_type == "roberta":
__lowerCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase = "roberta"
elif args.model_type == "gpt2":
__lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__lowerCamelCase = "transformer"
__lowerCamelCase = model.state_dict()
__lowerCamelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__lowerCamelCase = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__lowerCamelCase = f"""{prefix}.embeddings.{w}.weight"""
__lowerCamelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__lowerCamelCase = f"""{prefix}.embeddings.LayerNorm.{w}"""
__lowerCamelCase = state_dict[param_name]
# Transformer Blocks #
__lowerCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__lowerCamelCase = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__lowerCamelCase = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__lowerCamelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__lowerCamelCase = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase = state_dict[f"""lm_head.dense.{w}"""]
__lowerCamelCase = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__lowerCamelCase = state_dict[f"""{prefix}.ln_f.{w}"""]
__lowerCamelCase = state_dict["lm_head.weight"]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''LayoutLMv2FeatureExtractor''']
__lowerCamelCase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCamelCase = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
__lowerCamelCase = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
__lowerCamelCase = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> str:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A_ = np.array([re.sub(__lowerCAmelCase , """""" , __lowerCAmelCase ) for x in predictions] )
A_ = np.array([re.sub(__lowerCAmelCase , """""" , __lowerCAmelCase ) for x in references] )
else:
A_ = np.asarray(__lowerCAmelCase )
A_ = np.asarray(__lowerCAmelCase )
if ignore_case:
A_ = np.char.lower(__lowerCAmelCase )
A_ = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
A_ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
A_ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
A_ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
A_ = string.digits.maketrans("""""" , """""" , string.digits )
A_ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
A_ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
A_ = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 100}
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ , A_ = analyze_text(_snake_case )
A_ = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
A_ = sum(single_char_strings.values() )
# one length string
A_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A_ = single_char_strings[ch]
A_ = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
A_ = sum(two_char_strings.values() )
A_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A_ = cha + cha
if sequence in two_char_strings:
A_ = two_char_strings[sequence]
A_ = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = Counter() # type: ignore
A_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase__ ( ) -> Optional[int]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_save_load_local()
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''T5Config'''
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( UpperCAmelCase__ ):
lowercase = 42
lowercase = 42
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 2000 , UpperCamelCase__ = 0.15 , UpperCamelCase__ = 0.01 , UpperCamelCase__ = 1348.0 , UpperCamelCase__ = 1e-5 , UpperCamelCase__ = 1 , ) -> Optional[Any]:
'''simple docstring'''
A_ = sigma_max
# setable values
A_ = None
self.set_sigmas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> str:
'''simple docstring'''
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ = torch.linspace(1 , UpperCamelCase__ , UpperCamelCase__ , device=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
A_ = sigma_min if sigma_min is not None else self.config.sigma_min
A_ = sigma_max if sigma_max is not None else self.config.sigma_max
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ )
A_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ = torch.exp(torch.linspace(math.log(UpperCamelCase__ ) , math.log(UpperCamelCase__ ) , UpperCamelCase__ ) )
A_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
A_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ = timesteps.to(self.discrete_sigmas.device )
A_ = self.discrete_sigmas[timesteps].to(sample.device )
A_ = self.get_adjacent_sigma(UpperCamelCase__ , UpperCamelCase__ ).to(sample.device )
A_ = torch.zeros_like(UpperCamelCase__ )
A_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ = diffusion.unsqueeze(-1 )
A_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase__ , device=sample.device , dtype=sample.dtype )
A_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase__ , prev_sample_mean=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ = step_size.unsqueeze(-1 )
A_ = sample + step_size * model_output
A_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
A_ = timesteps.to(original_samples.device )
A_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase__ ) * sigmas[:, None, None, None]
)
A_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import math
import qiskit
def UpperCAmelCase__ ( UpperCAmelCase__ = 1, UpperCAmelCase__ = 1, UpperCAmelCase__ = 1 ) -> int:
if (
isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
A_ = qiskit.QuantumRegister(4, """qr""" )
A_ = qiskit.ClassicalRegister(2, """cr""" )
# list the entries
A_ = [input_a, input_a, carry_in]
A_ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], SCREAMING_SNAKE_CASE_ ) # measure the last two qbits
A_ = qiskit.Aer.get_backend("""aer_simulator""" )
A_ = qiskit.execute(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
lowercase = 'convnextv2'
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="gelu" , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=0.0 , UpperCamelCase__=224 , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**A_ )
A_ = num_channels
A_ = patch_size
A_ = num_stages
A_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
A_ = [3, 3, 9, 3] if depths is None else depths
A_ = hidden_act
A_ = initializer_range
A_ = layer_norm_eps
A_ = drop_path_rate
A_ = image_size
A_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
A_ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = []
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCAmelCase__ ) )
elif isinstance(UpperCAmelCase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCAmelCase__ ) )
elif isinstance(UpperCAmelCase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = []
for d in reversed(UpperCAmelCase__ ):
idx.append(flat_idx % d )
A_ = flat_idx // d
return tuple(reversed(UpperCAmelCase__ ) )
@torch.jit.ignore
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, ) -> Tuple:
def reduce_edge_list(UpperCAmelCase__ ) -> None:
A_ = True
for i in range(len(UpperCAmelCase__ ) ):
A_ = -1 * (i + 1)
l[reversed_idx] &= tally
A_ = l[reversed_idx]
if start_edges is None:
A_ = [s == 0 for s in start]
reduce_edge_list(UpperCAmelCase__ )
if end_edges is None:
A_ = [e == (d - 1) for e, d in zip(UpperCAmelCase__, UpperCAmelCase__ )]
reduce_edge_list(UpperCAmelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCAmelCase__ ) == 0:
return [()]
elif len(UpperCAmelCase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
A_ = []
A_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCAmelCase__, UpperCAmelCase__ ):
if s == e:
path_list.append(slice(UpperCAmelCase__, s + 1 ) )
else:
break
A_ = tuple(UpperCAmelCase__ )
A_ = len(UpperCAmelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCAmelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ = start[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ = end[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = t.shape[:no_batch_dims]
A_ = list(_flat_idx_to_idx(UpperCAmelCase__, UpperCAmelCase__ ) )
# _get_minimal_slice_set is inclusive
A_ = list(_flat_idx_to_idx(flat_end - 1, UpperCAmelCase__ ) )
# Get an ordered list of slices to perform
A_ = _get_minimal_slice_set(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = False, UpperCAmelCase__ = None, UpperCAmelCase__ = False, ) -> Dict:
if not (len(UpperCAmelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
A_ = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCAmelCase__ )]
A_ = tuple([max(UpperCAmelCase__ ) for s in zip(*UpperCAmelCase__ )] )
def _prep_inputs(UpperCAmelCase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A_ = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
A_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A_ = tensor_tree_map(_prep_inputs, UpperCAmelCase__ )
A_ = None
if _out is not None:
A_ = tensor_tree_map(lambda UpperCAmelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
A_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A_ = 0
A_ = prepped_outputs
for _ in range(UpperCAmelCase__ ):
# Chunk the input
if not low_mem:
A_ = _select_chunk
else:
A_ = partial(
_chunk_slice, flat_start=UpperCAmelCase__, flat_end=min(UpperCAmelCase__, i + chunk_size ), no_batch_dims=len(UpperCAmelCase__ ), )
A_ = tensor_tree_map(UpperCAmelCase__, UpperCAmelCase__ )
# Run the layer on the chunk
A_ = layer(**UpperCAmelCase__ )
# Allocate space for the output
if out is None:
A_ = tensor_tree_map(lambda UpperCAmelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), UpperCAmelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
def assign(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
for k, v in da.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
assign(UpperCAmelCase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A_ = da[k]
assign(UpperCAmelCase__, UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for xa, xa in zip(UpperCAmelCase__, UpperCAmelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A_ = xa
elif isinstance(UpperCAmelCase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A_ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
A_ = tensor_tree_map(lambda UpperCAmelCase__ : t.view(orig_batch_dims + t.shape[1:] ), UpperCAmelCase__ )
return out
class A__ :
def __init__( self , UpperCamelCase__ = 512 , ) -> Any:
'''simple docstring'''
A_ = max_chunk_size
A_ = None
A_ = None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
A_ = [c for c in candidates if c > min_chunk_size]
A_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase__ ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase__ , chunk_size=UpperCamelCase__ )
return True
except RuntimeError:
return False
A_ = 0
A_ = len(UpperCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
A_ = test_chunk_size(candidates[i] )
if not viable:
A_ = (min_viable_chunk_size_index + i) // 2
else:
A_ = i
A_ = (i + len(UpperCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
A_ = True
for aa, aa in zip(UpperCamelCase__ , UpperCamelCase__ ):
assert type(UpperCamelCase__ ) == type(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase__ : x[0] )]
A_ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase__ , UpperCamelCase__ )
else:
consistent &= aa == aa
return consistent
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> int:
'''simple docstring'''
A_ = True
A_ = tree_map(lambda UpperCamelCase__ : a.shape if isinstance(UpperCamelCase__ , torch.Tensor ) else a , UpperCamelCase__ , UpperCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase__ )
A_ = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
A_ = False
if not consistent:
A_ = self._determine_favorable_chunk_size(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = "sgugger/tiny-distilbert-classification"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , only_pretrain_model=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , torchscript=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , fpaa=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(lowerCamelCase__ )
# set architectures equal to `None`
A_ = None
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(lowerCamelCase__ )
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = "sshleifer/tinier_bart"
A_ = AutoConfig.from_pretrained(lowerCamelCase__ )
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(lowerCamelCase__ )
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = "sshleifer/tinier_bart"
A_ = AutoConfig.from_pretrained(lowerCamelCase__ )
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , save_to_csv=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowerCamelCase__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowerCamelCase__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowerCamelCase__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowerCamelCase__ , """env.csv""" ) , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """env.csv""" ) ).exists() )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(lowerCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """current""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase__ , """log.txt""" ) , log_print=lowerCamelCase__ , trace_memory_line_by_line=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
A_ = PyTorchBenchmark(lowerCamelCase__ )
A_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """log.txt""" ) ).exists() )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> Any:
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = [10, 20, 30, 40, 50, 60]
A_ = [2, 4, 6, 8, 10, 12]
A_ = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A_ = """xvjiarui/stable-diffusion-2-inpainting"""
A_ , A_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
A_ = """Face of a yellow cat, high resolution, sitting on a park bench"""
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = num_samples * [init_image]
A_ = num_samples * [mask_image]
A_ , A_ , A_ = pipeline.prepare_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# shard inputs and rng
A_ = replicate(UpperCAmelCase__ )
A_ = jax.random.split(UpperCAmelCase__ , jax.device_count() )
A_ = shard(UpperCAmelCase__ )
A_ = shard(UpperCAmelCase__ )
A_ = shard(UpperCAmelCase__ )
A_ = pipeline(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ )
A_ = output.images.reshape(UpperCAmelCase__ , 512 , 512 , 3 )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A__ ( __a ):
snake_case = "xlm-roberta-xl"
def __init__( self , UpperCamelCase__=250880 , UpperCamelCase__=2560 , UpperCamelCase__=36 , UpperCamelCase__=32 , UpperCamelCase__=10240 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=514 , UpperCamelCase__=1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-0_5 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class A__ ( __a ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCamelCase = '''facebook/wmt19-en-de'''
__lowerCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCamelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCamelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCamelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
__lowerCamelCase = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
A_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A_ = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
A_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0.0, UpperCAmelCase__ = 1.0, ) -> Any:
return mean(
function_to_integrate(uniform(lowercase__, lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = 0.0, UpperCAmelCase__ = 1.0 ) -> Any:
def identity_function(UpperCAmelCase__ ) -> float:
return x
A_ = area_under_curve_estimator(
lowercase__, lowercase__, lowercase__, lowercase__ )
A_ = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
def function_to_integrate(UpperCAmelCase__ ) -> float:
return sqrt(4.0 - x * x )
A_ = area_under_curve_estimator(
lowercase__, lowercase__, 0.0, 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ = 4 ) -> List[Any]:
A_ = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = matrix[::-1]
return matrix
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowerCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowerCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__lowerCamelCase = 10
__lowerCamelCase = 256
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
A_ = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class A__ :
def __init__( self , *,
UpperCamelCase__ = 0.85 , ) -> Any:
'''simple docstring'''
A_ = duplication_jaccard_threshold
A_ = NUM_PERM
A_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A_ = defaultdict(lowercase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = []
for base, duplicates in self._duplicate_clusters.items():
A_ = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
A_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = element
A_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(SCREAMING_SNAKE_CASE__, max_queue_size=1_00_00 ), chunksize=1_00, ):
if data is not None:
yield data
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ), max_queue_size=1_00 ) ):
di.add(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
A_ = get_tokens(SCREAMING_SNAKE_CASE__ )
A_ = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCamelCase = None
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = []
for elementa in cluster:
A_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
A_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A_ = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
global _shared_dataset
A_ = dataset
A_ = []
A_ = partial(_find_cluster_extremes_shared, jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, ), total=len(SCREAMING_SNAKE_CASE__ ), ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = 0.85 ) -> List[str]:
A_ = make_duplicate_clusters(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
A_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
A_ = {}
A_ = find_extremes(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
A_ = element
A_ = duplicate_indices - set(extreme_dict.keys() )
A_ = dataset.filter(lambda UpperCAmelCase__, UpperCAmelCase__ : idx not in remove_indices, with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
A_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}''' )
return ds_filter, duplicate_clusters
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = "first_exhausted", ) -> Optional[int]:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(a_ ):
if not isinstance(a_, (Dataset, IterableDataset) ):
if isinstance(a_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(a_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(a_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}.''' )
if i == 0:
A_ = (
(Dataset, IterableDataset) if isinstance(a_, a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_, a_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
a_, a_, a_, info=a_, split=a_, stopping_strategy=a_ )
else:
return _interleave_iterable_datasets(
a_, a_, a_, info=a_, split=a_, stopping_strategy=a_ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = 0, ) -> List[Any]:
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(a_ ):
if not isinstance(a_, (Dataset, IterableDataset) ):
if isinstance(a_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(a_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(a_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}.''' )
if i == 0:
A_ = (
(Dataset, IterableDataset) if isinstance(a_, a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_, a_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(a_, info=a_, split=a_, axis=a_ )
else:
return _concatenate_iterable_datasets(a_, info=a_, split=a_, axis=a_ )
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class A__ ( _snake_case ):
lowercase = "speech_to_text"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=10000 , UpperCamelCase__=12 , UpperCamelCase__=2048 , UpperCamelCase__=4 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=4 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=6000 , UpperCamelCase__=1024 , UpperCamelCase__=2 , UpperCamelCase__=(5, 5) , UpperCamelCase__=1024 , UpperCamelCase__=80 , UpperCamelCase__=1 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
A_ = vocab_size
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = max_source_positions
A_ = max_target_positions
A_ = num_conv_layers
A_ = list(UpperCamelCase__ )
A_ = conv_channels
A_ = input_feat_per_channel
A_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase = {
'''camembert-base''': 512,
}
__lowerCamelCase = '''▁'''
class A__ ( _UpperCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CamembertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from manim import *
class A__ ( UpperCAmelCase_ ):
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = Rectangle(height=0.5 , width=0.5 )
A_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ = [mem.copy() for i in range(6 )]
A_ = [mem.copy() for i in range(6 )]
A_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A_ = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
A_ = Text("""CPU""" , font_size=24 )
A_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
A_ = [mem.copy() for i in range(1 )]
A_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A_ = Text("""GPU""" , font_size=24 )
A_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
A_ = [mem.copy() for i in range(6 )]
A_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A_ = Text("""Model""" , font_size=24 )
A_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
A_ = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
A_ = []
A_ = []
A_ = []
for i, rect in enumerate(_snake_case ):
A_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
A_ = 0.46 / 4
A_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = current_set.copy()
for row_index, row in enumerate(UpperCAmelCase__ ):
A_ = row[0]
for column_index, column in enumerate(UpperCAmelCase__ ):
if magnitude == 0:
A_ = column
continue
A_ = column / magnitude
# Subtract to cancel term
A_ = current_set[0]
A_ = [first_row]
A_ = current_set[1::]
for row in current_set:
A_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCAmelCase__ )
continue
for column_index in range(len(UpperCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ = final_set[0]
A_ = []
A_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ = simplify(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, UpperCAmelCase__ )
A_ = resultant
return final_set
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
if len(UpperCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
A_ = len(UpperCAmelCase__ ) + 1
if any(len(UpperCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCAmelCase__, (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ = equations.copy()
if any(0 in row for row in data_set ):
A_ = data_set.copy()
A_ = []
for row_index, row in enumerate(UpperCAmelCase__ ):
if 0 not in row:
A_ = data_set.pop(UpperCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0, UpperCAmelCase__ )
A_ = data_set.copy()
A_ = simplify(UpperCAmelCase__ )
A_ = simplified[::-1]
A_ = []
for row in simplified:
A_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ = row.copy()[: len(UpperCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCAmelCase__ ) == 0:
solutions.append(0 )
continue
A_ = temp_row[1::]
A_ = temp_row[::-1]
for column_index, column in enumerate(UpperCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCAmelCase__ )
A_ = []
for item in solutions:
final.append(float(round(UpperCAmelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( UpperCamelCase__ ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "AutoImageProcessor"
lowercase = "AutoTokenizer"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(_a , _a )
A_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
A_ = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase = object()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
A_ = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase, ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
def replace(UpperCAmelCase__, UpperCAmelCase__ ):
for rule, replacement in rules:
if _match(__UpperCamelCase, __UpperCamelCase ):
return replacement
return val
return replace
def UpperCAmelCase__ ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""", __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = _get_partition_rules()
A_ = _replacement_rules(__UpperCamelCase )
A_ = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
A_ = {k: replace(__UpperCamelCase, __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowercase = MobileBertTokenizer
lowercase = MobileBertTokenizerFast
lowercase = True
lowercase = True
lowercase = filter_non_english
lowercase = "google/mobilebert-uncased"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
A_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = """UNwant\u00E9d,running"""
A_ = """unwanted, running"""
return input_text, output_text
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.tokenizer_class(self.vocab_file )
A_ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = """UNwant\u00E9d,running"""
A_ = tokenizer.tokenize(_lowerCamelCase )
A_ = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_lowerCamelCase )
A_ = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
A_ = self.get_tokenizer(do_lower_case=_lowerCamelCase )
A_ = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
A_ = """UNwant\u00E9d,running"""
A_ = tokenizer.tokenize(_lowerCamelCase )
A_ = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_lowerCamelCase )
A_ = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A_ = {}
for i, token in enumerate(_lowerCamelCase ):
A_ = i
A_ = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
A_ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
A_ = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A_ = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , """do_lower_case""" ) else False
A_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ["""的""", """人""", """有"""]
A_ = """""".join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = True
A_ = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ = False
A_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
A_ = start
A_ = end
A_ = val
A_ = (start + end) // 2
A_ = left
A_ = right
def __repr__( self ) -> int:
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = collection
A_ = function
if self.collection:
A_ = self._build_tree(0 , len(UpperCamelCase__ ) - 1 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
self._update_tree(self.root , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self._query_range(self.root , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if start == end:
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.collection[start] )
A_ = (start + end) // 2
A_ = self._build_tree(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._build_tree(mid + 1 , UpperCamelCase__ )
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.fn(left.val , right.val ) , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if node.start == i and node.end == i:
A_ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
self._update_tree(node.right , UpperCamelCase__ , UpperCamelCase__ )
A_ = self.fn(node.left.val , node.right.val )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.root is not None:
A_ = Queue()
queue.put(self.root )
while not queue.empty():
A_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A__ ( __lowercase ):
lowercase = 'gpt_neox'
def __init__( self , UpperCamelCase__=50432 , UpperCamelCase__=6144 , UpperCamelCase__=44 , UpperCamelCase__=64 , UpperCamelCase__=24576 , UpperCamelCase__="gelu" , UpperCamelCase__=0.25 , UpperCamelCase__=10000 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=2048 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = rotary_pct
A_ = rotary_emb_base
A_ = attention_dropout
A_ = hidden_dropout
A_ = classifier_dropout
A_ = initializer_range
A_ = layer_norm_eps
A_ = use_cache
A_ = tie_word_embeddings
A_ = use_parallel_residual
A_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
A_ = self.rope_scaling.get("""type""" , __A )
A_ = self.rope_scaling.get("""factor""" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
for i in range(len(UpperCamelCase__ ) - 1, 0, -1 ):
A_ = False
for j in range(UpperCamelCase__, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
A_ , A_ = unsorted[j - 1], unsorted[j]
A_ = True
for j in range(UpperCamelCase__ ):
if unsorted[j] > unsorted[j + 1]:
A_ , A_ = unsorted[j + 1], unsorted[j]
A_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowerCamelCase = logging.getLogger(__name__)
@dataclass(frozen=__UpperCAmelCase )
class A__ :
lowercase = 42
lowercase = 42
lowercase = None
lowercase = None
lowercase = None
@dataclass(frozen=__UpperCAmelCase )
class A__ :
lowercase = 42
lowercase = None
lowercase = None
lowercase = None
lowercase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A__ ( __UpperCAmelCase ):
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__ = False , ) -> List[Any]:
'''simple docstring'''
A_ = hans_processors[task]()
A_ = os.path.join(
__SCREAMING_SNAKE_CASE , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , ) , )
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
A_ = torch.load(__SCREAMING_SNAKE_CASE )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
A_ = (
processor.get_dev_examples(__SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE )
)
logger.info("""Training examples: %s""" , len(__SCREAMING_SNAKE_CASE ) )
A_ = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(self.features , __SCREAMING_SNAKE_CASE )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class A__ :
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 128 , UpperCamelCase__=False , UpperCamelCase__ = False , ) -> List[Any]:
'''simple docstring'''
A_ = hans_processors[task]()
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
A_ = processor.get_dev_examples(__SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE )
A_ = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.label_list
class A__ ( __UpperCAmelCase ):
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , """heuristics_train_set.txt""" ) ) , """train""" )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = []
for i, line in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
continue
A_ = """%s-%s""" % (set_type, line[0])
A_ = line[5]
A_ = line[6]
A_ = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
A_ = line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE ) )
return examples
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> str:
A_ = {label: i for i, label in enumerate(_UpperCAmelCase )}
A_ = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ), desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
A_ = tokenizer(
example.text_a, example.text_b, add_special_tokens=_UpperCAmelCase, max_length=_UpperCAmelCase, padding="""max_length""", truncation=_UpperCAmelCase, return_overflowing_tokens=_UpperCAmelCase, )
A_ = label_map[example.label] if example.label in label_map else 0
A_ = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase, label=_UpperCAmelCase, pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
__lowerCamelCase = {
'''hans''': 3,
}
__lowerCamelCase = {
'''hans''': HansProcessor,
}
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__lowerCamelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class A__ ( UpperCamelCase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<unk>" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[MASK]" , UpperCamelCase__="[CLS]" , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
A_ = vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
'''simple docstring'''
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.piece_to_id(__a )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = self.sp_model.IdToPiece(__a )
return token
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = []
A_ = ''
A_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
A_ = True
A_ = []
else:
current_sub_tokens.append(__a )
A_ = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
A_ = kwargs.pop("""use_source_tokenizer""" , __a )
A_ = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ = []
A_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
A_ = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(__a ) )
else:
A_ = ''.join(__a )
A_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=64 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> str:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = embedding_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A )
A_ = model(__A , token_type_ids=__A )
A_ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.num_choices
A_ = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
# test_resize_embeddings = False
lowercase = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = MegatronBertModelTester(self )
A_ = ConfigTester(self , config_class=__A , hidden_size=37 )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
return torch.tensor(
_lowercase, dtype=torch.long, device=_lowercase, )
__lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A_ = os.path.join(os.environ["""MYDIR"""] , __A )
A_ = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
A_ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
A_ = model(__A )[0]
A_ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
A_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
A_ = output[0, ii, jj]
A_ = expected[3 * ii + jj]
A_ = """ii={} jj={} a={} b={}""".format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = {}
A_ = job["""started_at"""]
A_ = job["""completed_at"""]
A_ = date_parser.parse(lowercase_ )
A_ = date_parser.parse(lowercase_ )
A_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A_ = start
A_ = end
A_ = duration_in_min
return job_info
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Tuple:
A_ = None
if token is not None:
A_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A_ = requests.get(lowercase_, headers=lowercase_ ).json()
A_ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(lowercase_ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = get_job_time(args.workflow_run_id)
__lowerCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from math import factorial
__lowerCamelCase = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def UpperCAmelCase__ ( UpperCAmelCase__ = 60, UpperCAmelCase__ = 1_00_00_00 ) -> List[str]:
if not isinstance(_lowerCamelCase, _lowerCamelCase ) or not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
A_ = 0
# the cached sizes of the previous chains
A_ = {}
for start_chain_element in range(1, _lowerCamelCase ):
# The temporary set will contain the elements of the chain
A_ = set()
A_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
A_ = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCamelCase = """examples/"""
__lowerCamelCase = {
"""examples""": (re.compile(r'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), """release = \"VERSION\"\n"""),
}
__lowerCamelCase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
__lowerCamelCase = """README.md"""
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A_ = f.read()
A_ , A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace("""VERSION""", UpperCAmelCase__ )
A_ = re_pattern.sub(UpperCAmelCase__, UpperCAmelCase__ )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.write(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), UpperCAmelCase__, pattern="""examples""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> List[Any]:
A_ = """🤗 Transformers currently provides the following architectures"""
A_ = """1. Want to contribute a new model?"""
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
A_ = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""", """https://huggingface.co/docs/diffusers/model_doc""", )
index += 1
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> List[str]:
with open(REPLACE_FILES["""init"""], """r""" ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS["""init"""][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__=False ) -> Tuple:
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCAmelCase__ ) == 0:
A_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__, patch=UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> Tuple:
A_ = get_version()
A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCAmelCase__ ) == 0:
A_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCamelCase = '''\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'''
__lowerCamelCase = '''\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'''
__lowerCamelCase = '''\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> Any:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A_ = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in predictions] )
A_ = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in references] )
else:
A_ = np.asarray(lowercase__ )
A_ = np.asarray(lowercase__ )
if ignore_case:
A_ = np.char.lower(lowercase__ )
A_ = np.char.lower(lowercase__ )
if ignore_punctuation:
A_ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
A_ = np.char.translate(lowercase__ , table=lowercase__ )
A_ = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
A_ = string.digits.maketrans("""""" , """""" , string.digits )
A_ = np.char.translate(lowercase__ , table=lowercase__ )
A_ = np.char.translate(lowercase__ , table=lowercase__ )
A_ = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__lowerCamelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase__ ( UpperCAmelCase__="no", UpperCAmelCase__ = default_json_config_file, UpperCAmelCase__ = False ) -> str:
A_ = Path(SCREAMING_SNAKE_CASE_ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
A_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
A_ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
A_ = torch.cuda.device_count()
A_ = num_gpus
A_ = False
if num_gpus > 1:
A_ = 'MULTI_GPU'
else:
A_ = 'NO'
elif is_xpu_available() and use_xpu:
A_ = torch.xpu.device_count()
A_ = num_xpus
A_ = False
if num_xpus > 1:
A_ = 'MULTI_XPU'
else:
A_ = 'NO'
elif is_npu_available():
A_ = torch.npu.device_count()
A_ = num_npus
A_ = False
if num_npus > 1:
A_ = 'MULTI_NPU'
else:
A_ = 'NO'
else:
A_ = 0
A_ = True
A_ = 1
A_ = 'NO'
A_ = ClusterConfig(**SCREAMING_SNAKE_CASE_ )
config.to_json_file(SCREAMING_SNAKE_CASE_ )
return path
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = parser.add_parser("""default""", parents=SCREAMING_SNAKE_CASE_, help=SCREAMING_SNAKE_CASE_, formatter_class=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--config_file""", default=SCREAMING_SNAKE_CASE_, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=SCREAMING_SNAKE_CASE_, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCAmelCase__ ( ) -> Any:
A_ = 9, 14 # noqa: F841
A_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ = mst(__lowercase )
A_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ = tuple(answer[:2] )
A_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class A__ :
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(_A )
self.set_fail_transitions()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = 0
for character in keyword:
A_ = self.find_next_state(_A , _A )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
A_ = len(self.adlist ) - 1
else:
A_ = next_state
self.adlist[current_state]["output"].append(_A )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_A )
A_ = 0
while q:
A_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_A )
A_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(_A , self.adlist[child]["""value"""] ) is None
and state != 0
):
A_ = self.adlist[state]["""fail_state"""]
A_ = self.find_next_state(
_A , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
A_ = 0
A_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = {} # returns a dict with keywords and list of its occurrences
A_ = 0
for i in range(len(_A ) ):
while (
self.find_next_state(_A , string[i] ) is None
and current_state != 0
):
A_ = self.adlist[current_state]["""fail_state"""]
A_ = self.find_next_state(_A , string[i] )
if next_state is None:
A_ = 0
else:
A_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
A_ = []
result[key].append(i - len(_A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=100 , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , ) -> Optional[int]:
'''simple docstring'''
A_ = parent
A_ = vocab_size
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = FlaxBeitModel(config=snake_case__ )
A_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = FlaxBeitForMaskedImageModeling(config=snake_case__ )
A_ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.type_sequence_label_size
A_ = FlaxBeitForImageClassification(config=snake_case__ )
A_ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = FlaxBeitForImageClassification(snake_case__ )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(snake_case__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A__ ( __a , unittest.TestCase ):
lowercase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = FlaxBeitModelTester(self )
A_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(snake_case__ )
A_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(snake_case__ , snake_case__ )
A_ = model_class(snake_case__ )
@jax.jit
def model_jitted(UpperCamelCase__ , **UpperCamelCase__ ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest("""JIT Enabled""" ):
A_ = model_jitted(**snake_case__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A_ = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
A_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( ) -> Dict:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=snake_case__ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
A_ = np.ones((1, 196) , dtype=snake_case__ )
# forward pass
A_ = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case__ )
A_ = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=snake_case__ , return_tensors="""np""" )
# forward pass
A_ = model(**snake_case__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 1000)
self.assertEqual(logits.shape , snake_case__ )
A_ = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
A_ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=snake_case__ , return_tensors="""np""" )
# forward pass
A_ = model(**snake_case__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 21841)
self.assertEqual(logits.shape , snake_case__ )
A_ = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
A_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return {}, {}, {}
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = load_image(UpperCamelCase__ )
A_ = image.size
A_ = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.model(**UpperCamelCase__ )
return model_outputs
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = model_outputs.predicted_depth
A_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=UpperCamelCase__ )
A_ = prediction.squeeze().cpu().numpy()
A_ = (output * 255 / np.max(UpperCamelCase__ )).astype("""uint8""" )
A_ = Image.fromarray(UpperCamelCase__ )
A_ = {}
A_ = predicted_depth
A_ = depth
return output_dict
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
# Construct model
if gpta_config_file == "":
A_ = GPTaConfig()
else:
A_ = GPTaConfig.from_json_file(_A )
A_ = GPTaModel(_A )
# Load weights from numpy
load_tf_weights_in_gpta(_A, _A, _A )
# Save pytorch-model
A_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), _A )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=_A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.task_name.lower()
class A__ ( _A ):
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class A__ ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = Split.train , UpperCamelCase__ = None , ) -> Any:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase__ , )
A_ = args
A_ = glue_processors[args.task_name]()
A_ = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
A_ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
A_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
A_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
A_ = time.time()
A_ = torch.load(UpperCamelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
A_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A_ = self.processor.get_test_examples(args.data_dir )
else:
A_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A_ = examples[:limit_length]
A_ = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
A_ = time.time()
torch.save(self.features , UpperCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.label_list
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = 10
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = [1, 2, 3, 4]
A_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_A , self.block_size , 0 ) , _A )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
A_ = process_story(_A )
self.assertEqual(_A , [] )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = ''
A_ = process_story(_A )
self.assertEqual(_A , [] )
self.assertEqual(_A , [] )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
A_ = process_story(_A )
A_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_A , _A )
A_ = ['It was the best of times.']
self.assertEqual(_A , _A )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = torch.tensor([1, 2, 3, 4] )
A_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_A , 0 ).numpy() , expected.numpy() )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_A , 23 ).numpy() , expected.numpy() )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_A , 1 ).numpy() , expected.numpy() )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = 101
A_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ = compute_token_type_ids(_A , _A )
np.testing.assert_array_equal(_A , _A )
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = k_size // 2
A_ , A_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
A_ = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ , A_ = image.shape[0], image.shape[1]
# dst image height and width
A_ = height - k_size + 1
A_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
A_ = zeros((dst_height * dst_width, k_size * k_size) )
A_ = 0
for i, j in product(range(__snake_case ), range(__snake_case ) ):
A_ = ravel(image[i : i + k_size, j : j + k_size] )
A_ = window
row += 1
# turn the kernel into shape(k*k, 1)
A_ = gen_gaussian_kernel(__snake_case, __snake_case )
A_ = ravel(__snake_case )
# reshape and get the dst image
A_ = dot(__snake_case, __snake_case ).reshape(__snake_case, __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
__lowerCamelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
__lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import string
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
for key in range(len(string.ascii_uppercase ) ):
A_ = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A_ = string.ascii_uppercase.find(lowerCamelCase_ )
A_ = num - key
if num < 0:
A_ = num + len(string.ascii_uppercase )
A_ = translated + string.ascii_uppercase[num]
else:
A_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def UpperCAmelCase__ ( ) -> Tuple:
A_ = input("""Encrypted message: """ )
A_ = message.upper()
decrypt(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
for i in range(len(A__ ) - 1, 0, -1 ):
A_ = False
for j in range(A__, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
A_ , A_ = unsorted[j - 1], unsorted[j]
A_ = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
A_ , A_ = unsorted[j + 1], unsorted[j]
A_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10 ) -> Tuple:
if not isinstance(lowerCamelCase__, lowerCamelCase__ ) or n < 0:
raise ValueError("""Invalid input""" )
A_ = 10**n
A_ = 2_84_33 * (pow(2, 7_83_04_57, lowerCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( _snake_case ):
lowercase = "vit_msn"
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-0_6 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**lowercase_ )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = qkv_bias
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import heapq
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> set[int]:
A_ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__, [-1 * len(snake_case__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ = heapq.heappop(snake_case__ )[1][0]
chosen_vertices.add(snake_case__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ = elem[1][1].index(snake_case__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A_ = 1
A_ = 1
while repunit:
A_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00_00 ) -> int:
A_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase__ ( UpperCAmelCase__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ), UpperCAmelCase__ ) ) as input_file:
A_ = [
[int(UpperCAmelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
A_ = len(UpperCAmelCase__ )
A_ = len(matrix[0] )
A_ = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
A_ = matrix[i][0]
for j in range(1, UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
A_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, UpperCAmelCase__ ):
A_ = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
A_ = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowerCamelCase ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A_ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A_ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> str:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> List[str]:
A_ = 3
A_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('''T''')
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (position - 1) // 2
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (2 * position) + 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (2 * position) + 2
class A__ ( Generic[T] ):
def __init__( self ) -> str:
'''simple docstring'''
A_ = []
A_ = {}
A_ = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return str(self.heap )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return self.elements == 0
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
self.heap.append((elem, weight) )
A_ = self.elements
self.elements += 1
self._bubble_up(UpperCAmelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A_ = self.heap[0]
self._bubble_down(UpperCAmelCase__ )
return elem
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.position_map[elem]
A_ = (elem, weight)
if position > 0:
A_ = get_parent_position(UpperCAmelCase__ )
A_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.position_map[elem]
if curr_pos == 0:
return None
A_ = get_parent_position(UpperCAmelCase__ )
A_ = self.heap[curr_pos]
A_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_up(UpperCAmelCase__ )
return None
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.position_map[elem]
A_ = self.heap[curr_pos]
A_ = get_child_left_position(UpperCAmelCase__ )
A_ = get_child_right_position(UpperCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
A_ = self.heap[child_left_position]
A_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
if child_left_position < self.elements:
A_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
else:
return None
if child_right_position < self.elements:
A_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
return None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.heap[nodea_pos][0]
A_ = self.heap[nodea_pos][0]
A_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A_ = nodea_pos
A_ = nodea_pos
class A__ ( Generic[T] ):
def __init__( self ) -> List[str]:
'''simple docstring'''
A_ = {}
A_ = 0
def __repr__( self ) -> int:
'''simple docstring'''
return str(self.connections )
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.nodes
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A_ = {}
self.nodes += 1
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
self.add_node(UpperCAmelCase__ )
self.add_node(UpperCAmelCase__ )
A_ = weight
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, ) -> tuple[dict[T, int], dict[T, T | None]]:
A_ = {node: maxsize for node in graph.connections}
A_ = {node: None for node in graph.connections}
A_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCAmelCase__, UpperCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
A_ = priority_queue.extract_min()
A_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__, dist[neighbour] )
A_ = node
# running prim's algorithm
while not priority_queue.is_empty():
A_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__, dist[neighbour] )
A_ = node
return dist, parent
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''▁'''
__lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowerCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__lowerCamelCase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__lowerCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A__ ( __UpperCAmelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = []
lowercase = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
A_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ = 1
A_ = len(self.sp_model )
A_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
A_ = {v: k for k, v in self.lang_code_to_id.items()}
A_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ = src_lang if src_lang is not None else """en_XX"""
A_ = self.lang_code_to_id[self._src_lang]
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
A_ = self.__dict__.copy()
A_ = None
A_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ = [1] * len(self.prefix_tokens )
A_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Dict:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A_ = src_lang
A_ = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
A_ = self.convert_tokens_to_ids(_lowerCamelCase )
A_ = tgt_lang_id
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = """""".join(_lowerCamelCase ).replace(_lowerCamelCase , """ """ ).strip()
return out_string
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = "en_XX" , UpperCamelCase__ = None , UpperCamelCase__ = "ro_RO" , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.lang_code_to_id[src_lang]
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.lang_code_to_id[lang]
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A__ ( lowerCamelCase__ ):
lowercase = "wavlm"
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=320 , UpperCamelCase__=800 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=320 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=100 , UpperCamelCase__=256 , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=(512, 512, 512, 512, 1500) , UpperCamelCase__=(5, 3, 3, 1, 1) , UpperCamelCase__=(1, 2, 3, 1, 1) , UpperCamelCase__=512 , UpperCamelCase__=80 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=3 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
A_ = hidden_size
A_ = feat_extract_norm
A_ = feat_extract_activation
A_ = list(__lowerCamelCase )
A_ = list(__lowerCamelCase )
A_ = list(__lowerCamelCase )
A_ = conv_bias
A_ = num_buckets
A_ = max_bucket_distance
A_ = num_conv_pos_embeddings
A_ = num_conv_pos_embedding_groups
A_ = len(self.conv_dim )
A_ = num_hidden_layers
A_ = intermediate_size
A_ = hidden_act
A_ = num_attention_heads
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = feat_proj_dropout
A_ = final_dropout
A_ = layerdrop
A_ = layer_norm_eps
A_ = initializer_range
A_ = num_ctc_classes
A_ = vocab_size
A_ = do_stable_layer_norm
A_ = use_weighted_layer_sum
A_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = apply_spec_augment
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ = num_codevectors_per_group
A_ = num_codevector_groups
A_ = contrastive_logits_temperature
A_ = num_negatives
A_ = codevector_dim
A_ = proj_codevector_dim
A_ = diversity_loss_weight
# ctc loss
A_ = ctc_loss_reduction
A_ = ctc_zero_infinity
# adapter
A_ = add_adapter
A_ = adapter_kernel_size
A_ = adapter_stride
A_ = num_adapter_layers
A_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ = list(__lowerCamelCase )
A_ = list(__lowerCamelCase )
A_ = list(__lowerCamelCase )
A_ = xvector_output_dim
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
import os
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
A_ = os.path.join(UpperCamelCase__, """triangle.txt""" )
with open(UpperCamelCase__ ) as f:
A_ = f.readlines()
A_ = []
for line in triangle:
A_ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(UpperCamelCase__ ) )
a.append(UpperCamelCase__ )
for i in range(1, len(UpperCamelCase__ ) ):
for j in range(len(a[i] ) ):
A_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
A_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase__, UpperCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 60_08_51_47_51_43 ) -> Optional[int]:
try:
A_ = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
A_ = 2
A_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A_ = i
while n % i == 0:
A_ = n // i
i += 1
return int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( _UpperCAmelCase , unittest.TestCase ):
lowercase = FunnelTokenizer
lowercase = FunnelTokenizerFast
lowercase = True
lowercase = True
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
A_ = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A_ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = """UNwant\u00E9d,running"""
A_ = """unwanted, running"""
return input_text, output_text
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.tokenizer_class(self.vocab_file )
A_ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
A_ = tokenizer("""UNwant\u00E9d,running""" )
A_ = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
A_ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
A_ = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowerCamelCase, """r""" ) as f:
A_ = f.readlines()
A_ = F'''class {class_name}('''
A_ = F'''{4 * " "}def {test_name}('''
A_ = F'''{8 * " "}{correct_line.split()[0]}'''
A_ = F'''{16 * " "}{correct_line.split()[0]}'''
A_ = False
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = 0
A_ = []
for line in lines:
if line.startswith(_lowerCamelCase ):
A_ = True
elif in_class and line.startswith(_lowerCamelCase ):
A_ = True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
A_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
A_ = False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase, """w""" ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Tuple:
if fail is not None:
with open(_lowerCamelCase, """r""" ) as f:
A_ = {l.strip() for l in f.readlines()}
else:
A_ = None
with open(_lowerCamelCase, """r""" ) as f:
A_ = f.readlines()
A_ = defaultdict(_lowerCamelCase )
for line in correct_lines:
A_ = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__lowerCamelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A_ = 1
A_ = 1
while repunit:
A_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00_00 ) -> int:
A_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from math import factorial
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = real
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [1] * rank
else:
A_ = rank
def __repr__( self ) -> str:
'''simple docstring'''
return (
f'''{self.real}+'''
f'''{"+".join(str(UpperCamelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase__ )
def __add__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Dual(self.real + other , self.duals )
A_ = self.duals.copy()
A_ = other.duals.copy()
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
o_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
elif len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
s_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
A_ = []
for i in range(len(UpperCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase__ )
lowercase = __add__
def __sub__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self + other * -1
def __mul__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase__ )
A_ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase__ )
lowercase = __mul__
def __truediv__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase__ )
raise ValueError
def __floordiv__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase__ )
raise ValueError
def __pow__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if n < 0 or isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A_ = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
if not callable(UpperCAmelCase__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(UpperCAmelCase__, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
A_ = Dual(UpperCAmelCase__, 1 )
A_ = func(UpperCAmelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=4 , ) -> Any:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_choices
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_attention_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( __A , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = FlaxBertModelTester(self )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
A_ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase__ ( UpperCAmelCase__ = True, *UpperCAmelCase__, **UpperCAmelCase__ ) -> Optional[Any]:
if not is_tqdm_available():
raise ImportError("""Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
A_ = False
if main_process_only:
A_ = PartialState().local_process_index == 0
return _tqdm(*UpperCAmelCase__, **UpperCAmelCase__, disable=UpperCAmelCase__ )
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCamelCase = (720, 1280) # Height, Width
__lowerCamelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCamelCase = 1 / 100
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = 250
def UpperCAmelCase__ ( ) -> int:
A_ , A_ = get_dataset(UpperCAmelCase__, UpperCAmelCase__ )
for index in range(UpperCAmelCase__ ):
A_ = random.sample(range(len(UpperCAmelCase__ ) ), 4 )
A_ , A_ , A_ = update_image_and_anno(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, filter_scale=UpperCAmelCase__, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = path.split(os.sep )[-1].rsplit(""".""", 1 )[0]
A_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''', UpperCAmelCase__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
A_ = []
for anno in new_annos:
A_ = anno[3] - anno[1]
A_ = anno[4] - anno[2]
A_ = anno[1] + width / 2
A_ = anno[2] + height / 2
A_ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCAmelCase__ )
with open(F'''{file_root}.txt''', """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__, """*.txt""" ) ):
A_ = label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(UpperCAmelCase__ ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(UpperCAmelCase__, F'''{label_name}.jpg''' )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip("""\n""" ).split(""" """ )
A_ = float(obj[1] ) - float(obj[3] ) / 2
A_ = float(obj[2] ) - float(obj[4] ) / 2
A_ = float(obj[1] ) + float(obj[3] ) / 2
A_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0.0, ) -> Tuple:
A_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
A_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A_ = int(scale_x * output_size[1] )
A_ = int(scale_y * output_size[0] )
A_ = []
A_ = []
for i, index in enumerate(UpperCAmelCase__ ):
A_ = all_img_list[index]
path_list.append(UpperCAmelCase__ )
A_ = all_annos[index]
A_ = cva.imread(UpperCAmelCase__ )
if i == 0: # top-left
A_ = cva.resize(UpperCAmelCase__, (divid_point_x, divid_point_y) )
A_ = img
for bbox in img_annos:
A_ = bbox[1] * scale_x
A_ = bbox[2] * scale_y
A_ = bbox[3] * scale_x
A_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A_ = cva.resize(UpperCAmelCase__, (output_size[1] - divid_point_x, divid_point_y) )
A_ = img
for bbox in img_annos:
A_ = scale_x + bbox[1] * (1 - scale_x)
A_ = bbox[2] * scale_y
A_ = scale_x + bbox[3] * (1 - scale_x)
A_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A_ = cva.resize(UpperCAmelCase__, (divid_point_x, output_size[0] - divid_point_y) )
A_ = img
for bbox in img_annos:
A_ = bbox[1] * scale_x
A_ = scale_y + bbox[2] * (1 - scale_y)
A_ = bbox[3] * scale_x
A_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A_ = cva.resize(
UpperCAmelCase__, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A_ = img
for bbox in img_annos:
A_ = scale_x + bbox[1] * (1 - scale_x)
A_ = scale_y + bbox[2] * (1 - scale_y)
A_ = scale_x + bbox[3] * (1 - scale_x)
A_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
A_ = start
A_ = end
A_ = val
A_ = (start + end) // 2
A_ = left
A_ = right
def __repr__( self ) -> Optional[int]:
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = collection
A_ = function
if self.collection:
A_ = self._build_tree(0 , len(_UpperCAmelCase ) - 1 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
self._update_tree(self.root , _UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self._query_range(self.root , _UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.collection[start] )
A_ = (start + end) // 2
A_ = self._build_tree(_UpperCAmelCase , _UpperCAmelCase )
A_ = self._build_tree(mid + 1 , _UpperCAmelCase )
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.fn(left.val , right.val ) , _UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == i:
A_ = val
return
if i <= node.mid:
self._update_tree(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
self._update_tree(node.right , _UpperCAmelCase , _UpperCAmelCase )
A_ = self.fn(node.left.val , node.right.val )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.root is not None:
A_ = Queue()
queue.put(self.root )
while not queue.empty():
A_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase__ ( UpperCAmelCase__=32, UpperCAmelCase__=10, UpperCAmelCase__=1_00, UpperCAmelCase__=10_26, UpperCAmelCase__=True, UpperCAmelCase__="data/tokenized_stories_train_wikitext103.jbl", UpperCAmelCase__="igf_context_pairs.jbl", ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
A_ , A_ = generate_datasets(
UpperCAmelCase__, UpperCAmelCase__, number=UpperCAmelCase__, min_len=10_26, trim=UpperCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A_ = load_gpta("""gpt2""" ).to(UpperCAmelCase__ )
print("""computing perplexity on objective set""" )
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).item()
print("""perplexity on objective set:""", UpperCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=15, UpperCAmelCase__=1_28, UpperCAmelCase__=1_00, UpperCAmelCase__="igf_model.pt", ) -> Union[str, Any]:
set_seed(42 )
# Load pre-trained model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A_ = SecondaryLearner(UpperCAmelCase__ )
# Train secondary learner
A_ = train_secondary_learner(
UpperCAmelCase__, UpperCAmelCase__, max_epochs=UpperCAmelCase__, batch_size=UpperCAmelCase__, eval_freq=1_00, igf_model_path=UpperCAmelCase__, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=32, UpperCAmelCase__=10_00, UpperCAmelCase__=16, UpperCAmelCase__=1.0, UpperCAmelCase__=recopy_gpta, UpperCAmelCase__=None, UpperCAmelCase__=10, UpperCAmelCase__="gpt2_finetuned.pt", ) -> int:
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A_ = RandomSampler(UpperCAmelCase__ )
A_ = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__ )
A_ = max_steps // (len(UpperCAmelCase__ )) + 1
A_ = 0
A_ = torch.zeros((1, context_len), dtype=torch.long, device=UpperCAmelCase__ )
A_ , A_ , A_ = recopy_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase__ )
secondary_learner.eval()
A_ = []
A_ = 0
A_ = []
A_ = []
# Compute the performance of the transformer model at the beginning
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
for epoch in range(int(UpperCAmelCase__ ) ):
for step, example in enumerate(UpperCAmelCase__ ):
torch.cuda.empty_cache()
A_ = random.randint(0, example.size(2 ) - context_len - 1 )
A_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A_ = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
A_ = True
if secondary_learner is not None:
A_ = secondary_learner.forward(
torch.tensor(UpperCAmelCase__, dtype=torch.long, device=UpperCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A_ = -1
if predicted_q < threshold:
A_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(), UpperCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase__ ( ) -> List[str]:
A_ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The input data dir. Should contain data files for WikiText.""", )
parser.add_argument(
"""--model_name_or_path""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
), )
parser.add_argument(
"""--igf_data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A jbl file containing the context and information gain pairs to train secondary learner.""", )
parser.add_argument(
"""--output_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The output directory where the final fine-tuned model is stored.""", )
parser.add_argument(
"""--tokenizer_name""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument("""--seed""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""", default=32, type=UpperCAmelCase__, help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
), )
parser.add_argument(
"""--size_objective_set""", default=1_00, type=UpperCAmelCase__, help="""number of articles that are long enough to be used as our objective set""", )
parser.add_argument(
"""--eval_freq""", default=1_00, type=UpperCAmelCase__, help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""", default=10_00, type=UpperCAmelCase__, help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""", default=1_28, type=UpperCAmelCase__, help="""batch size of training data for secondary learner""", )
parser.add_argument(
"""--batch_size""", default=16, type=UpperCAmelCase__, help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""", default=10, type=UpperCAmelCase__, help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
), )
parser.add_argument(
"""--number""", default=1_00, type=UpperCAmelCase__, help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""", default=10_26, type=UpperCAmelCase__, help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""", default=15, type=UpperCAmelCase__, help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""", default=1.0, type=UpperCAmelCase__, help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
), )
parser.add_argument("""--finetuned_model_name""", default="""gpt2_finetuned.pt""", type=UpperCAmelCase__, help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Reset the model to the original pretrained GPT-2 weights after each iteration""", )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32, max_steps=10, size_objective_set=1_00, min_len=10_26, trim=UpperCAmelCase__, data_file="""data/tokenized_stories_train_wikitext103.jbl""", igf_data_file="""igf_context_pairs.jbl""", )
# Load train data for secondary learner
A_ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A_ = training_secondary_learner(
UpperCAmelCase__, secondary_learner_max_epochs=15, secondary_learner_batch_size=1_28, eval_freq=1_00, igf_model_path="""igf_model.pt""", )
# load pretrained gpt2 model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A_ , A_ = generate_datasets(
context_len=32, file="""data/tokenized_stories_train_wikitext103.jbl""", number=1_00, min_len=10_26, trim=UpperCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, context_len=32, max_steps=10_00, batch_size=16, threshold=1.0, recopy_model=UpperCAmelCase__, secondary_learner=UpperCAmelCase__, eval_interval=10, finetuned_model_name="""gpt2_finetuned.pt""", )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = []
A_ = 0
A_ = 0
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.head == self.tail
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
self.data.append(__UpperCamelCase )
A_ = self.tail + 1
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.data[self.head]
A_ = self.head + 1
return ret
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.tail - self.head
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class A__ :
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = data
A_ = None
A_ = None
A_ = 1
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.data
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return self.left
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.right
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.height
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = data
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = height
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if node is None:
return 0
return node.get_height()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if a > b:
return a
return b
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""left rotation node:""", node.get_data() )
A_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""right rotation node:""", node.get_data() )
A_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right(), UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A_ = rl_rotation(UpperCAmelCase__ )
else:
A_ = left_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_right()
if right_child is None:
break
A_ = right_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_left()
if left_child is None:
break
A_ = left_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
A_ = root.get_left()
A_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A_ = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
elif left_child is not None:
A_ = left_child
elif right_child is not None:
A_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A_ = left_rotation(UpperCAmelCase__ )
else:
A_ = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class A__ :
def __init__( self ) -> Dict:
'''simple docstring'''
A_ = None
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return get_height(self.root )
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
print("""insert:""" + str(__UpperCamelCase ) )
A_ = insert_node(self.root , __UpperCamelCase )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
print("""delete:""" + str(__UpperCamelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
A_ = del_node(self.root , __UpperCamelCase )
def __str__( self , ) -> Optional[int]: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
A_ = """"""
A_ = MyQueue()
q.push(self.root )
A_ = self.get_height()
if layer == 0:
return output
A_ = 0
while not q.is_empty():
A_ = q.pop()
A_ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__UpperCamelCase )
q.push(__UpperCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __UpperCamelCase ) - 1:
A_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCamelCase = AVLtree()
__lowerCamelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
A_ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
A_ = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
A_ = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
A_ = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
A_ = text_classifier("""This is great !""" , return_all_scores=UpperCamelCase__ )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
A_ = text_classifier("""This is great !""" , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
A_ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
A_ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def snake_case_ ( self ) -> str:
'''simple docstring'''
import torch
A_ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
A_ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
A_ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = pipeline("""text-classification""" )
A_ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = pipeline("""text-classification""" , framework="""tf""" )
A_ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A_ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A_ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = TextClassificationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A_ = """HuggingFace is in"""
A_ = text_classifier(UpperCamelCase__ )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
A_ = ["""HuggingFace is in """, """Paris is in France"""]
A_ = text_classifier(UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}, {"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A_ = text_classifier(UpperCamelCase__ , top_k=UpperCamelCase__ )
A_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [[{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] * N, [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] * N] , )
A_ = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
A_ = text_classifier(UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A_ = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(UpperCamelCase__ ):
text_classifier(UpperCamelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A_ = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""label""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase = RoFormerTokenizer
lowercase = RoFormerTokenizerFast
lowercase = True
lowercase = True
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().setUp()
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = """永和服装饰品有限公司,今天天气非常好"""
A_ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ , A_ = self.get_chinese_input_output_texts()
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
A_ = tokens + [tokenizer.unk_token]
A_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_rust_tokenizer()
A_ , A_ = self.get_chinese_input_output_texts()
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
A_ = tokens + [tokenizer.unk_token]
A_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def snake_case_ ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = object_detector(examples[0] , threshold=0.0 )
A_ = len(lowerCamelCase__ )
self.assertGreater(lowerCamelCase__ , 0 )
self.assertEqual(
lowerCamelCase__ , [
{
"""score""": ANY(lowerCamelCase__ ),
"""label""": ANY(lowerCamelCase__ ),
"""box""": {"""xmin""": ANY(lowerCamelCase__ ), """ymin""": ANY(lowerCamelCase__ ), """xmax""": ANY(lowerCamelCase__ ), """ymax""": ANY(lowerCamelCase__ )},
}
for i in range(lowerCamelCase__ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
A_ = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline("""zero-shot-object-detection""" )
A_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
A_ = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = 0.2
A_ = pipeline("""zero-shot-object-detection""" )
A_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase__ , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = 2
A_ = pipeline("""zero-shot-object-detection""" )
A_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase__ , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class A__ ( UpperCamelCase_ ):
lowercase = "efficientnet"
def __init__( self , UpperCamelCase__ = 3 , UpperCamelCase__ = 600 , UpperCamelCase__ = 2.0 , UpperCamelCase__ = 3.1 , UpperCamelCase__ = 8 , UpperCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ = [] , UpperCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ = 0.25 , UpperCamelCase__ = "swish" , UpperCamelCase__ = 2560 , UpperCamelCase__ = "mean" , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 0.001 , UpperCamelCase__ = 0.99 , UpperCamelCase__ = 0.5 , UpperCamelCase__ = 0.2 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = num_channels
A_ = image_size
A_ = width_coefficient
A_ = depth_coefficient
A_ = depth_divisor
A_ = kernel_sizes
A_ = in_channels
A_ = out_channels
A_ = depthwise_padding
A_ = strides
A_ = num_block_repeats
A_ = expand_ratios
A_ = squeeze_expansion_ratio
A_ = hidden_act
A_ = hidden_dim
A_ = pooling_type
A_ = initializer_range
A_ = batch_norm_eps
A_ = batch_norm_momentum
A_ = dropout_rate
A_ = drop_connect_rate
A_ = sum(UpperCamelCase__ ) * 4
class A__ ( UpperCamelCase_ ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-5
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import re
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(UpperCamelCase__, UpperCamelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_MASKED_LM_MAPPING
lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case_ ( self ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-0_5, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-0_5, """token""": 25506, """token_str""": """ accuser"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-0_5,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-0_5,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-0_5, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-0_5, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS"""},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-0_5, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 13606, """token_str""": """ Clara"""},
] , )
A_ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
[
{
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
A_ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_torch
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(UpperCamelCase__ )
@slow
@require_tf
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
A_ = None
A_ = None
self.run_pipeline_test(UpperCamelCase__ , [] )
@require_tf
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
A_ = None
A_ = None
self.run_pipeline_test(UpperCamelCase__ , [] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A_ = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = fill_masker.tokenizer
A_ = fill_masker.model
A_ = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
A_ = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
A_ = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
with self.assertRaises(UpperCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCamelCase__ ):
fill_masker("""This is""" )
self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = sorted(vocab.keys() )[:2]
# Pipeline argument
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Call argument
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Score equivalence
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
A_ = [top_mask["""token_str"""] for top_mask in outputs]
A_ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ) == set(UpperCamelCase__ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
A_ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCamelCase__ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCamelCase__ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(UpperCamelCase__ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="""""" )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# top_k=2, ntargets=3
A_ = sorted(vocab.keys() )[:3]
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A_ = [el["""token_str"""] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A_ = tokenizer.get_vocab()
# String duplicates + id duplicates
A_ = sorted(vocab.keys() )[:3]
A_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A_ = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCamelCase__ ) , 3 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A_ = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( ) -> Dict:
A_ = """https://pypi.org/pypi/diffusers/json"""
A_ = json.loads(request.urlopen(UpperCAmelCase__ ).read() )["""releases"""].keys()
return sorted(UpperCAmelCase__, key=lambda UpperCAmelCase__ : version.Version(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( ) -> Tuple:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCAmelCase__ )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A_ = Path(UpperCAmelCase__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
init_hf_modules()
A_ = Path(UpperCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A_ = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""" ) as f:
A_ = f.read()
# Imports of the form `import .xxx`
A_ = re.findall("""^\s*import\s+\.(\S+)\s*$""", UpperCAmelCase__, flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""", UpperCAmelCase__, flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = False
A_ = [module_file]
A_ = []
# Let's recurse through all relative imports
while not no_change:
A_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCAmelCase__ ) )
A_ = Path(UpperCAmelCase__ ).parent
A_ = [str(module_path / m ) for m in new_imports]
A_ = [f for f in new_import_files if f not in all_relative_imports]
A_ = [F'''{f}.py''' for f in new_import_files]
A_ = len(UpperCAmelCase__ ) == 0
all_relative_imports.extend(UpperCAmelCase__ )
return all_relative_imports
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""" ) as f:
A_ = f.read()
# Imports of the form `import xxx`
A_ = re.findall("""^\s*import\s+(\S+)\s*$""", UpperCAmelCase__, flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""", UpperCAmelCase__, flags=re.MULTILINE )
# Only keep the top-level module
A_ = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ = list(set(UpperCAmelCase__ ) )
A_ = []
for imp in imports:
try:
importlib.import_module(UpperCAmelCase__ )
except ImportError:
missing_packages.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{", ".join(UpperCAmelCase__ )}. Run `pip install {" ".join(UpperCAmelCase__ )}`''' )
return get_relative_imports(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = module_path.replace(os.path.sep, """.""" )
A_ = importlib.import_module(UpperCAmelCase__ )
if class_name is None:
return find_pipeline_class(UpperCAmelCase__ )
return getattr(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
from ..pipelines import DiffusionPipeline
A_ = dict(inspect.getmembers(UpperCAmelCase__, inspect.isclass ) )
A_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls, UpperCAmelCase__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
A_ = cls
return pipeline_class
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = False, UpperCAmelCase__ = False, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, ) -> List[str]:
A_ = str(UpperCAmelCase__ )
A_ = os.path.join(UpperCAmelCase__, UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ):
A_ = module_file_or_url
A_ = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ = get_diffusers_versions()
# cut ".dev0"
A_ = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
A_ = F'''v{revision}'''
elif revision == "main":
A_ = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
A_ = COMMUNITY_PIPELINES_URL.format(revision=UpperCAmelCase__, pipeline=UpperCAmelCase__ )
try:
A_ = cached_download(
UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, proxies=UpperCAmelCase__, resume_download=UpperCAmelCase__, local_files_only=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, )
A_ = """git"""
A_ = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
A_ = hf_hub_download(
UpperCAmelCase__, UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, proxies=UpperCAmelCase__, resume_download=UpperCAmelCase__, local_files_only=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, )
A_ = os.path.join("""local""", """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
A_ = check_imports(UpperCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
A_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCAmelCase__ )
A_ = Path(UpperCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCAmelCase__, submodule_path / module_file )
for module_needed in modules_needed:
A_ = F'''{module_needed}.py'''
shutil.copy(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = use_auth_token
elif use_auth_token is True:
A_ = HfFolder.get_token()
else:
A_ = None
A_ = model_info(UpperCAmelCase__, revision=UpperCAmelCase__, token=UpperCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ = submodule_path / commit_hash
A_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCAmelCase__, submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCAmelCase__, F'''{module_needed}.py''', cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, resume_download=UpperCAmelCase__, proxies=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, revision=UpperCAmelCase__, local_files_only=UpperCAmelCase__, )
return os.path.join(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, UpperCAmelCase__ = False, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, **UpperCAmelCase__, ) -> List[str]:
A_ = get_cached_module_file(
UpperCAmelCase__, UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, resume_download=UpperCAmelCase__, proxies=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, revision=UpperCAmelCase__, local_files_only=UpperCAmelCase__, )
return get_class_in_module(UpperCAmelCase__, final_module.replace(""".py""", """""" ) )
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.