code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = arr.split(""",""" )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCAmelCase = input("""please input some numbers:""")
__UpperCAmelCase = SubArray(whole_array)
__UpperCAmelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase_ )
# Initialize Result
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase_ ):
# Find denominations
while int(lowerCamelCase_ ) >= int(lowerCamelCase_ ):
total_value -= int(lowerCamelCase_ )
answer.append(lowerCamelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCAmelCase = []
__UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__UpperCAmelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__UpperCAmelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
__UpperCAmelCase = """zero2"""
__UpperCAmelCase = """zero3"""
__UpperCAmelCase = [ZEROa, ZEROa]
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parameterized.to_safe_name("""_""".join(str(lowerCamelCase_ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = models[model]
SCREAMING_SNAKE_CASE : Optional[int] = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir("""./xxx""" , after=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowerCamelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE : Optional[int] = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE : Optional[Any] = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE : Optional[int] = self.get_launcher(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
import math
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.sqrt(lowerCamelCase_ ) ) # Size of every segment
SCREAMING_SNAKE_CASE : List[Any] = [True] * (end + 1)
SCREAMING_SNAKE_CASE : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase_ )
for i in range(start * start , end + 1 , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE : int = end + 1
SCREAMING_SNAKE_CASE : int = min(2 * end , lowerCamelCase_ )
while low <= n:
SCREAMING_SNAKE_CASE : List[str] = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase_ , high + 1 , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = False
for j in range(len(lowerCamelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE : Any = high + 1
SCREAMING_SNAKE_CASE : Optional[Any] = min(high + end , lowerCamelCase_ )
return prime
print(sieve(10**6))
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : int = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : List[str] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = resnets
SCREAMING_SNAKE_CASE : str = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE : Union[str, Any] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Dict = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE : Dict = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : int = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = resnets
SCREAMING_SNAKE_CASE : Tuple = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE : List[str] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : List[Any] = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : str = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE : str = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Tuple = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : List[str] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE : List[str] = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = resnets
SCREAMING_SNAKE_CASE : Union[str, Any] = attentions
def __call__( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.resnets[0](lowerCamelCase_ , lowerCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE : Optional[Any] = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
return hidden_states
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return int(x / 2**20 )
class UpperCamelCase__ :
"""simple docstring"""
def __enter__( self : int ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[int] , *lowerCamelCase_ : Any ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Dict = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
SCREAMING_SNAKE_CASE : List[str] = bamb(self.end - self.begin )
SCREAMING_SNAKE_CASE : Dict = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __A ( lowerCamelCase_ , lowerCamelCase_ = 16 , lowerCamelCase_ = "bert-base-cased" , lowerCamelCase_ = 3_20 , lowerCamelCase_ = 1_60 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'''train[:{n_train}]''', """validation""": f'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : List[str] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : List[Any] = config["""lr"""]
SCREAMING_SNAKE_CASE : List[str] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE : Optional[int] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE : Tuple = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE : Any = args.model_name_or_path
set_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : Dict = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : int = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : Any = 0
# Now we train the model
SCREAMING_SNAKE_CASE : List[str] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = outputs.loss
SCREAMING_SNAKE_CASE : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
SCREAMING_SNAKE_CASE : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase_ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase_ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase_ , default=1 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCAmelCase = trt.Logger(trt.Logger.WARNING)
__UpperCAmelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
__UpperCAmelCase = parser.parse_args()
if args.tokenizer_name:
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
__UpperCAmelCase = args.per_device_eval_batch_size
__UpperCAmelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCAmelCase = True
__UpperCAmelCase = """temp_engine/bert-fp32.engine"""
if args.fpaa:
__UpperCAmelCase = """temp_engine/bert-fp16.engine"""
if args.inta:
__UpperCAmelCase = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
__UpperCAmelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCAmelCase = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCAmelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCAmelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCAmelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCAmelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase_ )
# start time
SCREAMING_SNAKE_CASE : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase_ ) for d_inp in d_inputs] + [int(lowerCamelCase_ ), int(lowerCamelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE : int = time.time()
SCREAMING_SNAKE_CASE : Optional[Any] = end_time - start_time
SCREAMING_SNAKE_CASE : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCAmelCase = raw_datasets["""validation"""].column_names
__UpperCAmelCase = """question""" if """question""" in column_names else column_names[0]
__UpperCAmelCase = """context""" if """context""" in column_names else column_names[1]
__UpperCAmelCase = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCAmelCase = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__UpperCAmelCase = min(args.max_seq_length, tokenizer.model_max_length)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE : Any = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE : List[str] = tokenized_examples.sequence_ids(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE : Tuple = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE : Tuple = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__UpperCAmelCase = raw_datasets["""validation"""]
# Validation Feature Creation
__UpperCAmelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
__UpperCAmelCase = default_data_collator
__UpperCAmelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
__UpperCAmelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = postprocess_qa_predictions(
examples=lowerCamelCase_ , features=lowerCamelCase_ , predictions=lowerCamelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE : Tuple = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE : Dict = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase_ , label_ids=lowerCamelCase_ )
__UpperCAmelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(lowerCamelCase_ ) ) * engine.get_binding_dtype(lowerCamelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCAmelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
__UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCAmelCase = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__UpperCAmelCase = 0.0
__UpperCAmelCase = 0
__UpperCAmelCase = timeit.default_timer()
__UpperCAmelCase = None
for step, batch in enumerate(eval_dataloader):
__UpperCAmelCase , __UpperCAmelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCAmelCase , __UpperCAmelCase = outputs
__UpperCAmelCase = torch.tensor(start_logits)
__UpperCAmelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCAmelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCAmelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCAmelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCAmelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCAmelCase = nested_truncate(all_preds, len(eval_dataset))
__UpperCAmelCase = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
__UpperCAmelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCAmelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __A ( lowerCamelCase_ ):
"""simple docstring"""
random.seed(lowerCamelCase_ )
np.random.seed(lowerCamelCase_ )
torch.manual_seed(lowerCamelCase_ )
torch.cuda.manual_seed_all(lowerCamelCase_ )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Iterable[torch.nn.Parameter] , lowerCamelCase_ : float = 0.9_999 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Union[float, int] = 1.0 , lowerCamelCase_ : Union[float, int] = 2 / 3 , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Dict[str, Any] = None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.nn.Module ):
SCREAMING_SNAKE_CASE : Any = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE : List[str] = True
if kwargs.get("""max_value""" , lowerCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE : Optional[int] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE : Any = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = kwargs["""min_value"""]
SCREAMING_SNAKE_CASE : List[str] = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCamelCase_ ) is not None:
SCREAMING_SNAKE_CASE : Optional[int] = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
self.to(device=kwargs["""device"""] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = decay
SCREAMING_SNAKE_CASE : List[Any] = min_decay
SCREAMING_SNAKE_CASE : Dict = update_after_step
SCREAMING_SNAKE_CASE : List[Any] = use_ema_warmup
SCREAMING_SNAKE_CASE : List[str] = inv_gamma
SCREAMING_SNAKE_CASE : List[Any] = power
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[Any] = None # set in `step()`
SCREAMING_SNAKE_CASE : str = model_cls
SCREAMING_SNAKE_CASE : Tuple = model_config
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = model_cls.load_config(lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_cls.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = cls(model.parameters() , model_cls=lowerCamelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase_ )
return ema_model
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
SCREAMING_SNAKE_CASE : Any = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE : Optional[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , lowerCamelCase_ )
model.register_to_config(**lowerCamelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE : Dict = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE : List[Any] = min(lowerCamelCase_ , self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE : Union[str, Any] = max(lowerCamelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.nn.Module ):
SCREAMING_SNAKE_CASE : Optional[int] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = parameters.parameters()
SCREAMING_SNAKE_CASE : List[str] = list(lowerCamelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE : Optional[int] = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE : Tuple = decay
SCREAMING_SNAKE_CASE : Tuple = 1 - decay
SCREAMING_SNAKE_CASE : Dict = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE : int = deepspeed.zero.GatheredParameters(lowerCamelCase_ , modifier_rank=lowerCamelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(lowerCamelCase_ )
for s_param, param in zip(self.shadow_params , lowerCamelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Dict=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [
p.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) if p.is_floating_point() else p.to(device=lowerCamelCase_ )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE : int = None
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
SCREAMING_SNAKE_CASE : int = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase_ ):
raise ValueError("""Invalid min_decay""" )
SCREAMING_SNAKE_CASE : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase_ ):
raise ValueError("""Invalid optimization_step""" )
SCREAMING_SNAKE_CASE : Dict = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase_ ):
raise ValueError("""Invalid update_after_step""" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
SCREAMING_SNAKE_CASE : int = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.get("""shadow_params""" , lowerCamelCase_ )
if shadow_params is not None:
SCREAMING_SNAKE_CASE : Tuple = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCamelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ = 10**9 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
SCREAMING_SNAKE_CASE : Optional[Any] = 4
SCREAMING_SNAKE_CASE : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
from math import factorial
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(0.044_715 , x.dtype )
SCREAMING_SNAKE_CASE : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) ))
return x * cdf
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tf.cast(0.044_715 , x.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -10 , 10 )
def __A ( lowerCamelCase_ , lowerCamelCase_=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ )
return a * tf.math.sigmoid(lowerCamelCase_ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ )
__UpperCAmelCase = tf.keras.activations.gelu
__UpperCAmelCase = approximate_gelu_wrap
else:
__UpperCAmelCase = _gelu
__UpperCAmelCase = _gelu_new
__UpperCAmelCase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE : int = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE : Optional[int] = input_file.read()
SCREAMING_SNAKE_CASE : Any = regexp.search(lowerCamelCase_ )
return match
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE : Optional[Any] = regexp.finditer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE : Any = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PriorTransformer
SCREAMING_SNAKE_CASE__ = '''hidden_states'''
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 4
SCREAMING_SNAKE_CASE : Dict = 8
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Dict = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
torch.manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[Any] = 7
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = self.model_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : List[Any] = output[0, :5].flatten().cpu()
print(lowerCamelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : Tuple=7_68 , lowerCamelCase_ : Tuple=77 , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
torch.manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = embedding_dim
SCREAMING_SNAKE_CASE : List[str] = num_embeddings
SCREAMING_SNAKE_CASE : int = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_seed_input(seed=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ )[0]
assert list(sample.shape ) == [1, 7_68]
SCREAMING_SNAKE_CASE : Optional[int] = sample[0, :8].flatten().cpu()
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE : Union[str, Any] = values[index] + knapsack(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
SCREAMING_SNAKE_CASE : Any = [int(lowerCamelCase_ ) for i in num_string]
SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE : Any = str(lowerCamelCase_ )
steps += 1
return steps
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
SCREAMING_SNAKE_CASE : Dict = [int(lowerCamelCase_ ) for i in num_string]
SCREAMING_SNAKE_CASE : str = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE : Tuple = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[int] = 20
SCREAMING_SNAKE_CASE : Tuple = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(lowerCamelCase_ , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : List[str] = 2
# create ramp distribution
SCREAMING_SNAKE_CASE : List[Any] = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE : List[Any] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE : List[Any] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE : int = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE : int = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = 10
SCREAMING_SNAKE_CASE : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE : str = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE : str = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE : Any = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE : Dict = 5
SCREAMING_SNAKE_CASE : List[str] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 15
SCREAMING_SNAKE_CASE : Optional[Any] = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 20
SCREAMING_SNAKE_CASE : Tuple = 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Any = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : int = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 20
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE : str = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Optional[int] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Any = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : List[str] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE : Any = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = input_ids.copy()
SCREAMING_SNAKE_CASE : Tuple = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE : Tuple = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
# no processor list
SCREAMING_SNAKE_CASE : Union[str, Any] = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# with processor list
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE : int = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 4
SCREAMING_SNAKE_CASE : Tuple = 10
SCREAMING_SNAKE_CASE : Optional[int] = 15
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE : Optional[int] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 10
# no processor list
def run_no_processor_list(lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
# with processor list
def run_processor_list(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE : List[str] = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
SCREAMING_SNAKE_CASE : Optional[Any] = jax.jit(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jax.jit(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE : Optional[int] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE : Tuple = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE : Optional[int] = self.__multiply()
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase_ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE : Dict = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = [[] for i in range(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Dict = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : List[str] = new_dft
SCREAMING_SNAKE_CASE : List[str] = next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE : Any = self.__dft("""B""" )
SCREAMING_SNAKE_CASE : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE : int = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for i in range(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : List[Any] = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE : Optional[int] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE : str = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE : List[str] = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : List[Any] = """The dog is cute and lives in the garden house"""
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([tokenizer.encode(lowerCamelCase_ )] )
SCREAMING_SNAKE_CASE : Any = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )["""last_hidden_state"""]
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1e-3 ) )
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XGLMTokenizer
SCREAMING_SNAKE_CASE__ = XGLMTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Dict = XGLMTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = """<pad>"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase_ ) , 10_08 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = XGLMTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
SCREAMING_SNAKE_CASE : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = """I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : int = tokenizer.encode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = """Hello World!"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""facebook/xglm-564M""" , padding=lowerCamelCase_ , )
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
from math import ceil
def __A ( lowerCamelCase_ = 10_01 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Dict = 2 * i + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * i
SCREAMING_SNAKE_CASE : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
__UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=13 , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]=99 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : Dict=36 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : str = embedding_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_hidden_groups
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AlbertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AlbertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , sentence_order_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AlbertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AlbertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = AlbertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_choices
SCREAMING_SNAKE_CASE : List[Any] = AlbertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AlbertModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = AlbertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AlbertModel.from_pretrained("""albert-base-v2""" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) )
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''canine'''
def __init__( self : Any , lowerCamelCase_ : Optional[int]=7_68 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[str]=1_63_84 , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : List[Any]=0Xe_000 , lowerCamelCase_ : str=0Xe_001 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Any=1_63_84 , lowerCamelCase_ : Dict=1_28 , **lowerCamelCase_ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE : Optional[Any] = downsampling_rate
SCREAMING_SNAKE_CASE : List[str] = upsampling_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hash_functions
SCREAMING_SNAKE_CASE : Tuple = num_hash_buckets
SCREAMING_SNAKE_CASE : Tuple = local_transformer_stride
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
__UpperCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
__UpperCAmelCase = """</w>"""
__UpperCAmelCase = """@@ """
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = set()
SCREAMING_SNAKE_CASE : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : Dict = char
return pairs
# Speech2Text2 has no max input length
__UpperCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]="<s>" , lowerCamelCase_ : Union[str, Any]="<pad>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : int=None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE : Dict = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
else:
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE : int = merges_handle.read().split("""\n""" )[:-1]
SCREAMING_SNAKE_CASE : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Tuple = {}
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.decoder )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : List[str] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = bigram
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = 0
while i < len(lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Tuple = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : str = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Dict = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = """ """.join(lowerCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE : Union[str, Any] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = word.replace(lowerCamelCase_ , """""" )
SCREAMING_SNAKE_CASE : List[str] = word.replace(""" """ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = word
return word
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : str = text.lower()
SCREAMING_SNAKE_CASE : int = text.split()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.decoder.get(lowerCamelCase_ , self.unk_token )
return result
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = """ """.join(lowerCamelCase_ )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE : List[str] = """""".join(string.split(lowerCamelCase_ ) )
return string
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
SCREAMING_SNAKE_CASE : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
__UpperCAmelCase = """Tobias Carryer"""
from time import time
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[Any]=int(time() ) ): # noqa: B008
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = multiplier
SCREAMING_SNAKE_CASE : List[Any] = increment
SCREAMING_SNAKE_CASE : List[str] = modulo
SCREAMING_SNAKE_CASE : str = seed
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCamelCase_ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCamelCase_ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCamelCase_ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCamelCase_ , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE : Tuple = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE : Union[str, Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE : Dict = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE : int = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE : Tuple = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE : Dict = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'''{len(lowerCamelCase_ )} examples to process.''' )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1_00_00
SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE : List[Any] = f'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE : int = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
rslt.append(lowerCamelCase_ )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE : Dict = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
logger.info("""Finished binarization""" )
logger.info(f'''{len(lowerCamelCase_ )} examples processed.''' )
SCREAMING_SNAKE_CASE : Any = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE : Optional[int] = [np.uintaa(lowerCamelCase_ ) for d in rslt]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.intaa(lowerCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCamelCase_ , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
SCREAMING_SNAKE_CASE__ = True
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = (16, 16)
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowerCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=lowerCamelCase_ , only_cross_attention=lowerCamelCase_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
SCREAMING_SNAKE_CASE : List[str] = EulerDiscreteScheduler(prediction_type="""sample""" )
SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""quick_gelu""" , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE : Tuple = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """cpu"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = pipe(**lowerCamelCase_ ).images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
SCREAMING_SNAKE_CASE : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**lowerCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , scheduler_enum.name )
SCREAMING_SNAKE_CASE : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = pipe(**lowerCamelCase_ )[0]
outputs.append(lowerCamelCase_ )
assert check_same_shape(lowerCamelCase_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
SCREAMING_SNAKE_CASE : int = pipe(lowerCamelCase_ , generator=lowerCamelCase_ , output_type="""latent""" ).images
SCREAMING_SNAKE_CASE : Tuple = upscaler(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase_ , output_type="""np""" , ).images[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
SCREAMING_SNAKE_CASE : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
SCREAMING_SNAKE_CASE : Optional[int] = upscaler(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase_ , output_type="""np""" , ).images[0]
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : Optional[Any] = model_type_to_module_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(f'''.{module_name}''' , """transformers.models""" )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , """__name__""" , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module("""transformers""" )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def __A ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as reader:
return json.load(lowerCamelCase_ )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase_ )
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = kwargs.pop("""config""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""trust_remote_code""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = config_dict.get("""feature_extractor_type""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
SCREAMING_SNAKE_CASE : str = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE : Any = getattr(lowerCamelCase_ , """feature_extractor_type""" , lowerCamelCase_ )
if hasattr(lowerCamelCase_ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE : Dict = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE : str = feature_extractor_class_from_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE : str = feature_extractor_class is not None or type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE : Optional[Any] = resolve_trust_remote_code(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : int = get_class_from_dynamic_module(
lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = kwargs.pop("""code_revision""" , lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE : List[str] = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase_ )]
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase_ , lowerCamelCase_ )
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
SCREAMING_SNAKE_CASE : Any = np.pad(lowerCamelCase_ , mode="""linear_ramp""" , pad_width=lowerCamelCase_ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return max(lowerCamelCase_ , min(lowerCamelCase_ , lowerCamelCase_ ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = list(lowerCamelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE : Dict = clamp_rect(lowerCamelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCamelCase_ , (original_slice, 0) )
return result
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE : Any = tile.crop(lowerCamelCase_ )
return tile
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = n % d
return n - divisor
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : int = 3_50 , ):
'''simple docstring'''
super().__init__(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , max_noise_level=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE : Any = add_overlap_rect(lowerCamelCase_ , lowerCamelCase_ , image.size )
SCREAMING_SNAKE_CASE : Any = image.crop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE : List[Any] = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE : Any = max(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = squeeze_tile(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = to_input.size
SCREAMING_SNAKE_CASE : Tuple = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Optional[int] = super(lowerCamelCase_ , self ).__call__(image=lowerCamelCase_ , **lowerCamelCase_ ).images[0]
SCREAMING_SNAKE_CASE : Optional[int] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Any = unsqueeze_tile(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Tuple = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
SCREAMING_SNAKE_CASE : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCamelCase_ ) , mode="""L""" , )
final_image.paste(
lowerCamelCase_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCamelCase_ : int = 75 , lowerCamelCase_ : float = 9.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1_28 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 32 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE : Any = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE : Tuple = tcx * tcy
SCREAMING_SNAKE_CASE : str = 0
for y in range(lowerCamelCase_ ):
for x in range(lowerCamelCase_ ):
self._process_tile(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prompt=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , noise_level=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase_ , revision="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCamelCase_ ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
SCREAMING_SNAKE_CASE : Any = pipe(image=lowerCamelCase_ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCamelCase_ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
SCREAMING_SNAKE_CASE : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(lowerCamelCase_ , 2 ) * torus_radius * tube_radius
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
SCREAMING_SNAKE_CASE : Dict = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( a :Optional[Any] , a :int , a :List[str] , a :List[str] ) -> Tuple:
a = s.rsplit(a , a )
return new.join(a )
def _a ( a :Any ) -> List[Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _a ( a :Any ) -> List[Any]:
a = {}
a = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
a = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
a = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
a = rreplace(a , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
a = rreplace(a , '''.b''' , '''.bias''' , 1 )
a = value.float()
return upgrade
@torch.no_grad()
def _a ( a :Union[str, Any] , a :Optional[Any] , a :Optional[int]=None , a :str=True ) -> Tuple:
from dall_e import Encoder
a = Encoder()
if os.path.exists(a ):
a = torch.load(a )
else:
a = torch.hub.load_state_dict_from_url(a )
if isinstance(a , a ):
a = ckpt.state_dict()
encoder.load_state_dict(a )
if config_path is not None:
a = FlavaImageCodebookConfig.from_pretrained(a )
else:
a = FlavaImageCodebookConfig()
a = FlavaImageCodebook(a ).eval()
a = encoder.state_dict()
a = upgrade_state_dict(a )
hf_model.load_state_dict(a )
a = hf_model.state_dict()
a = count_parameters(a )
a = count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(a )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger()
@dataclass
class __A :
a__ : nn.Module
a__ : List[nn.Module] = field(default_factory=UpperCamelCase__ )
a__ : list = field(default_factory=UpperCamelCase__ )
def _lowercase (self : List[Any] , __a : int , __a : Tensor , __a : Tensor ):
UpperCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(__a , nn.Convad ) or isinstance(__a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__a )
def __call__(self : int , __a : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__a )
[x.remove() for x in self.handles]
return self
@property
def _lowercase (self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
a__ : nn.Module
a__ : nn.Module
a__ : int = 1
a__ : List = field(default_factory=UpperCamelCase__ )
a__ : List = field(default_factory=UpperCamelCase__ )
a__ : bool = True
def __call__(self : List[Any] , __a : Tensor ):
UpperCAmelCase_ = Tracker(self.dest )(__a ).parametrized
UpperCAmelCase_ = Tracker(self.src )(__a ).parametrized
UpperCAmelCase_ = list(filter(lambda __a : type(__a ) not in self.src_skip , __a ) )
UpperCAmelCase_ = list(filter(lambda __a : type(__a ) not in self.dest_skip , __a ) )
if len(__a ) != len(__a ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(__a )} operations while"""
f""" destination module has {len(__a )}.""" )
for dest_m, src_m in zip(__a , __a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class __A ( nn.Module ):
def __init__(self : str , __a : nn.Module ):
super().__init__()
UpperCAmelCase_ = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
UpperCAmelCase_ = len(__a ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCAmelCase_ = nn.ModuleDict(__a )
def _lowercase (self : Any , __a : Tensor ):
return get_trunk_forward_outputs(
__a , out_feat_keys=__a , feature_blocks=self._feature_blocks , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : str ):
UpperCAmelCase_ = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self : Any , __a : str ):
# default to timm!
if x not in self:
UpperCAmelCase_ = self.convert_name_to_timm(__a )
UpperCAmelCase_ = partial(lambda: (timm.create_model(__a , pretrained=__a ).eval(), None) )
else:
UpperCAmelCase_ = super().__getitem__(__a )
return val
class __A ( UpperCamelCase__ ):
def __getitem__(self : List[Any] , __a : str ):
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ = RegNetModel
else:
UpperCAmelCase_ = RegNetForImageClassification
return val
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : List[Tuple[str, str]] ) -> Union[str, Any]:
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Callable[[], nn.Module] , snake_case_ : Callable[[], nn.Module] , snake_case_ : RegNetConfig , snake_case_ : Path , snake_case_ : bool = True , ) -> int:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ = from_model_func()
UpperCAmelCase_ = our_model_func(snake_case_ ).eval()
UpperCAmelCase_ = ModuleTransfer(src=snake_case_ , dest=snake_case_ , raise_if_mismatch=snake_case_ )
UpperCAmelCase_ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case_ )
if from_state_dict is not None:
UpperCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCAmelCase_ = manually_copy_vissl_head(snake_case_ , our_model.state_dict() , snake_case_ )
our_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = our_model(snake_case_ , output_hidden_states=snake_case_ )
UpperCAmelCase_ = (
our_outputs.logits if isinstance(snake_case_ , snake_case_ ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ = from_model(snake_case_ )
UpperCAmelCase_ = from_output[-1] if type(snake_case_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(snake_case_ , snake_case_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=snake_case_ , )
UpperCAmelCase_ = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=snake_case_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=snake_case_ , )
print(f"""Pushed {name}""" )
def lowerCAmelCase_ ( snake_case_ : Path , snake_case_ : str = None , snake_case_ : bool = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 10_00
UpperCAmelCase_ = (1, num_labels)
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = partial(snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
UpperCAmelCase_ = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
UpperCAmelCase_ = NameToOurModelFuncMap()
UpperCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(snake_case_ : str , snake_case_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , model_dir=str(snake_case_ ) , map_location="cpu" )
UpperCAmelCase_ = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ = files["classy_state_dict"]["base_model"]["model"]
UpperCAmelCase_ = model_state_dict["trunk"]
model.load_state_dict(snake_case_ )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
snake_case_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , snake_case_ , snake_case_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , snake_case_ , snake_case_ , snake_case_ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 2 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
A : Any = 0
A : List[Any] = str(snake_case__ )
while len(snake_case__ ) != 1:
A : str = [int(snake_case__ ) for i in num_string]
A : List[str] = 1
for i in range(0 , len(snake_case__ ) ):
total *= numbers[i]
A : Optional[Any] = str(snake_case__ )
steps += 1
return steps
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
A : Tuple = 0
A : Dict = str(snake_case__ )
while len(snake_case__ ) != 1:
A : Any = [int(snake_case__ ) for i in num_string]
A : Optional[int] = 0
for i in range(0 , len(snake_case__ ) ):
total += numbers[i]
A : Tuple = str(snake_case__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = CpmAntTokenizer
lowerCamelCase : List[str] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
super().setUp()
lowerCAmelCase = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = ['今天', '天气', '真', '好', '!']
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = [tokenizer.bos_token] + tokens
lowerCAmelCase = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A:
def __init__( self , _snake_case , _snake_case=12 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=0.02 , _snake_case=0 , _snake_case=None , ) -> Dict:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = projection_dim
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = dropout
__a = attention_dropout
__a = max_position_embeddings
__a = initializer_range
__a = scope
__a = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__a = input_mask.numpy()
__a , __a = input_mask.shape
__a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
__a = 1
__a = 0
__a = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TFBlipTextModel(config=_snake_case )
__a = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
__a = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A( a , unittest.TestCase ):
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BlipTextModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=True ) -> Optional[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case ) | 6 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case( *SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=2 ) -> Optional[int]:
'''simple docstring'''
from .. import __version__
A__ = take_from
A__ = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
A__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
A__ = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
A__ = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
A__ = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A__ = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A__ = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
A__ = inspect.getouterframes(inspect.currentframe() )[1]
A__ = call_frame.filename
A__ = call_frame.lineno
A__ = call_frame.function
A__ , A__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 7 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCAmelCase : str ='pt'
elif is_tf_available():
__lowerCAmelCase : Dict ='tf'
else:
__lowerCAmelCase : Dict ='jax'
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __magic_name__( self :Any ) -> str:
super().setUp()
__SCREAMING_SNAKE_CASE : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self :str ) -> List[Any]:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __magic_name__( self :List[Any] , **lowerCAmelCase__ :Optional[int] ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :List[Any]=20 , lowerCAmelCase__ :str=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda lowerCAmelCase__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__SCREAMING_SNAKE_CASE : Dict = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__SCREAMING_SNAKE_CASE : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : str = '''Unicode €.'''
__SCREAMING_SNAKE_CASE : Any = tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''[CLS]Unicode €.[SEP]''' )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer('''e è é ê ë''' )
__SCREAMING_SNAKE_CASE : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__SCREAMING_SNAKE_CASE : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : int = [
'''Summary of the text.''',
'''Another summary.''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __magic_name__( self :Union[str, Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Tuple = ''' He is very happy, UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Tuple = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : int = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(125 )]
__SCREAMING_SNAKE_CASE : List[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__SCREAMING_SNAKE_CASE : Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__SCREAMING_SNAKE_CASE : Any = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Optional[int] ) -> List[Any]:
pass
def __magic_name__( self :Any ) -> str:
pass
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
pass
def __magic_name__( self :Union[str, Any] ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : int = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
| 9 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 2048,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase_ : Dict="<|endoftext|>" , UpperCAmelCase_ : Any="<|endoftext|>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Dict , ) ->Any:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Union[str, Any] =getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowerCamelCase__: Any =add_prefix_space
lowerCamelCase__: Optional[int] =pre_tok_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =add_prefix_space
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : "Conversation") ->List[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id])
if len(UpperCAmelCase_) > self.model_max_length:
lowerCamelCase__: List[str] =input_ids[-self.model_max_length :]
return input_ids
| 10 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 0 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
_A : str = []
_A , _A : Dict = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_A : int = result + left + right
return input_list
def _UpperCAmelCase (UpperCamelCase__ : list ):
if len(UpperCamelCase__ ) <= 1:
return input_list
_A : Optional[Any] = list(UpperCamelCase__ )
# iteration for two-way merging
_A : Dict = 2
while p <= len(UpperCamelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
_A : List[str] = i
_A : Dict = i + p - 1
_A : Optional[Any] = (low + high + 1) // 2
_A : Any = merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase__ ):
_A : int = i
_A : Any = merge(UpperCamelCase__ , 0 , UpperCamelCase__ , len(UpperCamelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 11 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase__ ( A__ : NDArray[floataa] , A__ : NDArray[floataa] , A__ : list[int] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = coefficient_matrix.shape
__lowerCamelCase, __lowerCamelCase = constant_matrix.shape
if rowsa != colsa:
__lowerCamelCase = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(A__ )
if colsa != 1:
__lowerCamelCase = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(A__ )
if rowsa != rowsa:
__lowerCamelCase = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(A__ )
if len(A__ ) != rowsa:
__lowerCamelCase = (
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(A__ )} and {rowsa}'
)
raise ValueError(A__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
__lowerCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCamelCase, __lowerCamelCase = table.shape
strictly_diagonally_dominant(A__ )
# Iterates the whole matrix for given number of times
for _ in range(A__ ):
__lowerCamelCase = []
for row in range(A__ ):
__lowerCamelCase = 0
for col in range(A__ ):
if col == row:
__lowerCamelCase = table[row][col]
elif col == cols - 1:
__lowerCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCamelCase = (temp + val) / denom
new_val.append(A__ )
__lowerCamelCase = new_val
return [float(A__ ) for i in new_val]
def lowerCamelCase__ ( A__ : NDArray[floataa] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = table.shape
__lowerCamelCase = True
for i in range(0 , A__ ):
__lowerCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[Any] = BlipImageProcessor()
SCREAMING_SNAKE_CASE_: Tuple = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
SCREAMING_SNAKE_CASE_: Union[str, Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert")
SCREAMING_SNAKE_CASE_: Tuple = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase__ : List[str]):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase__ : Tuple):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).image_processor
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase__ : Union[str, Any]):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).qformer_tokenizer
def _SCREAMING_SNAKE_CASE ( self : List[str]):
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_: Any = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
SCREAMING_SNAKE_CASE_: Tuple = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0)
SCREAMING_SNAKE_CASE_: List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_: str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Tuple = image_processor(lowerCAmelCase__ , return_tensors="np")
SCREAMING_SNAKE_CASE_: List[Any] = processor(images=lowerCAmelCase__ , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_: int = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = "lower newer"
SCREAMING_SNAKE_CASE_: Optional[int] = processor(text=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Dict = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_: Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = "lower newer"
SCREAMING_SNAKE_CASE_: Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Dict = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(
list(inputs.keys()) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_: Any = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_: str = processor.batch_decode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_: Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = "lower newer"
SCREAMING_SNAKE_CASE_: Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(
list(inputs.keys()) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 13 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(lowercase_ ):
A__ = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = '''a''' * 1_000 + '''.lock'''
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 14 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 0 |
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE :int = 'examples/'
SCREAMING_SNAKE_CASE :Dict = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
SCREAMING_SNAKE_CASE :Optional[Any] = 'README.md'
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace("VERSION" , a_ )
__A = re_pattern.sub(a_ , a_ )
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(a_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" )
def UpperCAmelCase ( a_ , a_=False ) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a_ , a_ , a_ )
if not patch:
update_version_in_examples(a_ )
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__A = "🤗 Transformers currently provides the following architectures"
__A = "1. Want to contribute a new model?"
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__A = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a_ )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
__A = f.read()
__A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0]
return packaging.version.parse(a_ )
def UpperCAmelCase ( a_=False ) -> Dict:
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(a_ ) == 0:
__A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ , patch=a_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__A = get_version()
__A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__A = current_version.base_version
# Check with the user we got that right.
__A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a_ ) == 0:
__A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 15 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _A ( UpperCamelCase_ : List[Any]) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-z))
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
return (-y * np.log(UpperCamelCase_) - (1 - y) * np.log(1 - h)).mean()
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[int]) -> List[str]:
'''simple docstring'''
__lowercase = np.dot(UpperCamelCase_, UpperCamelCase_)
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase_)))
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Optional[int], UpperCamelCase_ : Tuple, UpperCamelCase_ : List[Any]=70000) -> Tuple:
'''simple docstring'''
__lowercase = np.zeros(x.shape[1])
for iterations in range(UpperCamelCase_):
__lowercase = np.dot(UpperCamelCase_, UpperCamelCase_)
__lowercase = sigmoid_function(UpperCamelCase_)
__lowercase = np.dot(x.T, h - y) / y.size
__lowercase = theta - alpha * gradient # updating the weights
__lowercase = np.dot(UpperCamelCase_, UpperCamelCase_)
__lowercase = sigmoid_function(UpperCamelCase_)
__lowercase = cost_function(UpperCamelCase_, UpperCamelCase_)
if iterations % 100 == 0:
print(F"""loss: {j} \t""") # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def _A ( UpperCamelCase_ : str) -> Tuple:
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCamelCase_, UpperCamelCase_)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 17 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 0 |
from __future__ import annotations
import requests
__lowerCamelCase : int = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int = 1 , lowerCAmelCase : str = "new" , lowerCAmelCase : list | None = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
SCREAMING_SNAKE_CASE_ : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : List[str] = {}
for id_ in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 18 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000_000 ) -> int:
lowercase : Optional[Any] = 1
lowercase : List[Any] = 1
lowercase : Any = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = 0
lowercase : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase : Union[str, Any] = counter
if counter > pre_counter:
lowercase : List[Any] = inputa
lowercase : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 20 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_lowercase : Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :Optional[Any] = 256
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str = ["""melgan"""]
def __init__( self : Any , snake_case_ : SpectrogramNotesEncoder , snake_case_ : SpectrogramContEncoder , snake_case_ : TaFilmDecoder , snake_case_ : DDPMScheduler , snake_case_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
_UpperCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
_UpperCAmelCase = 4.0 # Largest value for most examples
_UpperCAmelCase = 1_2_8
self.register_modules(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int=(-1.0, 1.0) , snake_case_ : List[Any]=False ):
_UpperCAmelCase , _UpperCAmelCase = output_range
if clip:
_UpperCAmelCase = torch.clip(snake_case_ , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any]=(-1.0, 1.0) , snake_case_ : Optional[Any]=False ):
_UpperCAmelCase , _UpperCAmelCase = input_range
_UpperCAmelCase = torch.clip(snake_case_ , snake_case_ , snake_case_ ) if clip else outputs
# Scale to [0, 1].
_UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowercase ( self : Dict , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = input_tokens > 0
_UpperCAmelCase , _UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=snake_case_ , encoder_inputs_mask=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = self.continuous_encoder(
encoder_inputs=snake_case_ , encoder_inputs_mask=snake_case_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : str ):
_UpperCAmelCase = noise_time
if not torch.is_tensor(snake_case_ ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase = self.decoder(
encodings_and_masks=snake_case_ , decoder_input_tokens=snake_case_ , decoder_noise_time=snake_case_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , snake_case_ : List[List[int]] , snake_case_ : Optional[torch.Generator] = None , snake_case_ : int = 1_0_0 , snake_case_ : bool = True , snake_case_ : str = "numpy" , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(snake_case_ )}.' )
_UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
for i, encoder_input_tokens in enumerate(snake_case_ ):
if i == 0:
_UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCAmelCase = ones
_UpperCAmelCase = self.scale_features(
snake_case_ , output_range=[-1.0, 1.0] , clip=snake_case_ )
_UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case_ , continuous_mask=snake_case_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=snake_case_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(snake_case_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.decode(
encodings_and_masks=snake_case_ , input_tokens=snake_case_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_UpperCAmelCase = self.scale_to_features(snake_case_ , input_range=[-1.0, 1.0] )
_UpperCAmelCase = mel[:1]
_UpperCAmelCase = mel.cpu().float().numpy()
_UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case_ , snake_case_ )
logger.info("Generated segment" , snake_case_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
_UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=snake_case_ )
| 22 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case_ ( _lowerCAmelCase : np.ndarray ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def snake_case_ ( _lowerCAmelCase : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def snake_case_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ) -> np.ndarray:
UpperCAmelCase : int = np.zeros_like(_lowerCAmelCase )
UpperCAmelCase : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase : Dict = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCamelCase__: int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
UpperCamelCase__: Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCamelCase__: Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCamelCase__: Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCamelCase__: Union[str, Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 23 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = GPTaTokenizer
A_ : List[Any] = GPTaTokenizerFast
A_ : Dict = True
A_ : List[str] = {'add_prefix_space': True}
A_ : List[str] = False
def a (self : Any ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : List[str] , **a__ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : Dict , **a__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a (self : int , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case = '''lower newer'''
# Testing tokenization
__snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case = tokenizer.encode(a__ , add_prefix_space=a__ )
__snake_case = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
__snake_case = tokens + [rust_tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : Tuple , *a__ : Optional[int] , **a__ : Optional[Any] ):
"""simple docstring"""
pass
def a (self : List[str] , a__ : Any=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__snake_case = tokenizer.pad_token_id
__snake_case = tokenizer(a__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' )
__snake_case = tokenizer(*a__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = '''$$$'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = tokenizer.bos_token_id
__snake_case = tokenizer(a__ )
__snake_case = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case = tokenizer.decode(out_s.input_ids )
__snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Dict ):
"""simple docstring"""
__snake_case = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__snake_case = '''Encode this.'''
__snake_case = '''This one too please.'''
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
__snake_case = encoded_sequence_dict['''input_ids''']
__snake_case = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(a__ ) , len(a__ ) )
__snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
__snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
__snake_case = AutoTokenizer.from_pretrained('''./test_opt''' )
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a__ )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ )
__snake_case = '''bos'''
__snake_case = tokenizer.get_vocab()['''bos''']
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
__snake_case = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__snake_case = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 24 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ : str = 5_0_0_0_0
UpperCAmelCase__ : List[str] = 5_0_0_0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = os.path.split(__file__)
UpperCAmelCase__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
for i in range(0 ,len(_snake_case ) ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = dataset[i : i + batch_size]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(0 ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = dataset[i : i + batch_size]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE__ : str = generate_example_dataset(
os.path.join(_snake_case ,"""dataset.arrow""" ) ,_snake_case ,num_examples=_snake_case ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : str = func(_snake_case ,**_snake_case )
print("""shuffling dataset""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Tuple = func(
_snake_case ,**_snake_case )
with open(_snake_case ,"""wb""" ) as f:
f.write(json.dumps(_snake_case ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 25 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
_snake_case = parser.parse_args()
_snake_case = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 26 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowercase : Tuple = sys.version_info >= (3, 10)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = 42
A_ = 42
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = BasicEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = MixedTypeEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
@dataclass
class __UpperCamelCase :
A_ = list_field(default=[] )
A_ = list_field(default=[1, 2, 3] )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
A_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __UpperCamelCase :
A_ = field()
A_ = field()
A_ = field()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BasicEnum(self.required_enum )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field()
A_ = None
A_ = field(default="toto" , metadata={"help": "help message"} )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__a : Dict = {k: v for k, v in vars(__a ).items() if k != 'container'}
__a : Tuple = {k: v for k, v in vars(__a ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __a ) and yy.get('choices' , __a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__a ) , yy['type'](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = HfArgumentParser(__a )
__a : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument('--bar' , type=__a , required=__a )
expected.add_argument('--baz' , type=__a , required=__a )
expected.add_argument('--flag' , type=__a , default=__a , const=__a , nargs='?' )
self.argparsersEqual(__a , __a )
__a : Optional[int] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__a) , ) : Any = parser.parse_args_into_dataclasses(__a , look_for_args_file=__a )
self.assertFalse(example.flag )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = HfArgumentParser(__a )
__a : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , default=__a , const=__a , nargs='?' )
expected.add_argument('--baz' , type=__a , default=__a , const=__a , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__a , dest='baz' )
expected.add_argument('--opt' , type=__a , default=__a )
__a : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Dict = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : List[Any] = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : Tuple = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : List[Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : int = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : Optional[int] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__a : List[str] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__a : Optional[int] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
__a : Any = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCAmelCase ( self ):
'''simple docstring'''
@dataclass
class __UpperCamelCase :
A_ = "toto"
__a : Dict = HfArgumentParser(__a )
__a : List[str] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : Optional[int] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Any = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = HfArgumentParser(__a )
__a : Any = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__a )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__a )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__a )
self.argparsersEqual(__a , __a )
__a : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__a : Tuple = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__a , type=__a )
expected.add_argument('--bar' , default=__a , type=__a , help='help message' )
expected.add_argument('--baz' , default=__a , type=__a )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__a )
expected.add_argument('--des' , nargs='+' , default=[] , type=__a )
__a : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Tuple = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : Tuple = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , bar=__a , baz=__a , ces=[] , des=[] ) )
__a : Tuple = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__a , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__a , required=__a )
expected.add_argument('--required_str' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = HfArgumentParser(__a )
__a : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
expected.add_argument('--opt' , type=__a , default=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = HfArgumentParser(__a )
__a : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__a : Optional[Any] = parser.parse_dict(__a )[0]
__a : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = HfArgumentParser(__a )
__a : str = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__a , parser.parse_dict , __a , allow_extra_keys=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = HfArgumentParser(__a )
__a : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = os.path.join(__a , 'temp_json' )
os.mkdir(__a )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__a , __a )
__a : Tuple = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__a : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = HfArgumentParser(__a )
__a : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = os.path.join(__a , 'temp_yaml' )
os.mkdir(__a )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__a , __a )
__a : List[str] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__a : Dict = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 27 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCamelCase : int = (720, 1280) # Height, Width
_lowerCamelCase : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCamelCase : int = 1 / 100
_lowerCamelCase : Optional[Any] = ""
_lowerCamelCase : str = ""
_lowerCamelCase : str = ""
_lowerCamelCase : str = 250
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_dataset(A__ , A__ )
for index in range(A__ ):
UpperCamelCase = random.sample(range(len(A__ ) ) , 4 )
UpperCamelCase , UpperCamelCase , UpperCamelCase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase = random_chars(32 )
UpperCamelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCamelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCamelCase = []
for anno in new_annos:
UpperCamelCase = anno[3] - anno[1]
UpperCamelCase = anno[4] - anno[2]
UpperCamelCase = anno[1] + width / 2
UpperCamelCase = anno[2] + height / 2
UpperCamelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(A__ )
with open(F"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __lowerCamelCase ( A__ , A__ ) -> tuple[list, list]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ):
UpperCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A__ ) as in_file:
UpperCamelCase = in_file.readlines()
UpperCamelCase = os.path.join(A__ , F"""{label_name}.jpg""" )
UpperCamelCase = []
for obj_list in obj_lists:
UpperCamelCase = obj_list.rstrip('\n' ).split(' ' )
UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase = int(scale_x * output_size[1] )
UpperCamelCase = int(scale_y * output_size[0] )
UpperCamelCase = []
UpperCamelCase = []
for i, index in enumerate(A__ ):
UpperCamelCase = all_img_list[index]
path_list.append(A__ )
UpperCamelCase = all_annos[index]
UpperCamelCase = cva.imread(A__ )
if i == 0: # top-left
UpperCamelCase = cva.resize(A__ , (divid_point_x, divid_point_y) )
UpperCamelCase = img
for bbox in img_annos:
UpperCamelCase = bbox[1] * scale_x
UpperCamelCase = bbox[2] * scale_y
UpperCamelCase = bbox[3] * scale_x
UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCamelCase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
UpperCamelCase = img
for bbox in img_annos:
UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase = bbox[2] * scale_y
UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCamelCase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase = img
for bbox in img_annos:
UpperCamelCase = bbox[1] * scale_x
UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase = bbox[3] * scale_x
UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCamelCase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase = img
for bbox in img_annos:
UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 28 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : int = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCAmelCase_ : Dict = input_file.read()
UpperCAmelCase_ : Union[str, Any] = regexp.search(_UpperCamelCase )
return match
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : Tuple = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCAmelCase_ : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase_ : Optional[int] = regexp.finditer(_UpperCamelCase )
UpperCAmelCase_ : List[str] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = Path('./datasets' )
UpperCAmelCase_ : List[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = Path('./datasets' )
UpperCAmelCase_ : Optional[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 29 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( snake_case__: Dict[str, torch.Tensor] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = []
for rt in rc.restypes:
lowercase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase_ = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase_ = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ = restype_atomaa_mask[protein_aatype]
lowercase_ = residx_atomaa_mask
lowercase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase_ = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase_ = rc.restype_atoa[restype_letter]
lowercase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase_ = rc.atom_order[atom_name]
lowercase_ = 1
lowercase_ = restype_atomaa_mask[protein_aatype]
lowercase_ = residx_atomaa_mask
return protein
def a ( snake_case__: Dict[str, torch.Tensor] ):
'''simple docstring'''
lowercase_ = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch['''aatype'''].device ) , snake_case__ , np.ndarray )
lowercase_ = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 30 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 0 |
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , A : int ):
# we need a list not a string, so do something to change the type
_UpperCAmelCase : int = arr.split("," )
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = [int(self.array[0] )] * len(self.array )
_UpperCAmelCase : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_UpperCAmelCase : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_UpperCAmelCase : List[str] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = input("""please input some numbers:""")
__SCREAMING_SNAKE_CASE : Optional[int] = SubArray(whole_array)
__SCREAMING_SNAKE_CASE : Any = array.solve_sub_array()
print(("""the results is:""", re))
| 31 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = '''beit'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=8_1_9_2 , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE__ : Any=2_2_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any=0.4 , SCREAMING_SNAKE_CASE__ : Any=2_5_6 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_5 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : str = hidden_size
a_ : int = num_hidden_layers
a_ : Any = num_attention_heads
a_ : Any = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Any = layer_norm_eps
a_ : Dict = image_size
a_ : List[str] = patch_size
a_ : Optional[int] = num_channels
a_ : str = use_mask_token
a_ : Optional[int] = use_absolute_position_embeddings
a_ : Union[str, Any] = use_relative_position_bias
a_ : Optional[Any] = use_shared_relative_position_bias
a_ : Optional[int] = layer_scale_init_value
a_ : str = drop_path_rate
a_ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
a_ : str = out_indices
a_ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
a_ : Optional[int] = use_auxiliary_head
a_ : Optional[Any] = auxiliary_loss_weight
a_ : List[str] = auxiliary_channels
a_ : List[str] = auxiliary_num_convs
a_ : Dict = auxiliary_concat_input
a_ : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1E-4
| 32 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 0 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__A : List[str] = True
from torch.cuda.amp import autocast
__A : Any = logging.getLogger(__name__)
def lowercase ( __snake_case : Any=None , __snake_case : Union[str, Any]=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=_A , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : WavaVecaProcessor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self : Optional[int] , A : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase_ : List[Any] = [{'''input_values''': feature['''input_values''']} for feature in features]
lowercase_ : Optional[int] = [{'''input_ids''': feature['''labels''']} for feature in features]
lowercase_ : Tuple = self.processor.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase_ : Any = self.processor.pad(
labels=A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowercase_ : Tuple = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
lowercase_ : List[Any] = labels
return batch
class _UpperCAmelCase ( _A ):
def A ( self : Optional[int] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
lowercase_ : Dict = self._prepare_inputs(A )
if self.use_amp:
with autocast():
lowercase_ : Union[str, Any] = self.compute_loss(A , A )
else:
lowercase_ : List[str] = self.compute_loss(A , A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ : Optional[int] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
return loss.detach()
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase_ : Tuple = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase_ : Tuple = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowercase_ : Optional[Any] = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__snake_case : Dict ):
lowercase_ : Optional[int] = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowercase_ : Optional[Any] = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
lowercase_ : Union[str, Any] = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case : Optional[int] ):
lowercase_ : str = ''' '''.join(batch['''text'''] )
lowercase_ : Optional[int] = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase_ : int = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
lowercase_ : Any = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
lowercase_ : int = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowercase_ : Optional[int] = {v: k for k, v in enumerate(__snake_case )}
lowercase_ : List[Any] = vocab_dict[''' ''']
del vocab_dict[" "]
lowercase_ : int = len(__snake_case )
lowercase_ : Tuple = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Any = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowercase_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
lowercase_ : str = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
lowercase_ : Any = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase_ : int = min(len(__snake_case ) , data_args.max_train_samples )
lowercase_ : List[Any] = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
lowercase_ : int = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase_ : Tuple = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case : List[str] ):
lowercase_ , lowercase_ : List[Any] = torchaudio.load(batch['''path'''] )
lowercase_ : Any = resampler(__snake_case ).squeeze().numpy()
lowercase_ : Any = 1_6_0_0_0
lowercase_ : Any = batch['''text''']
return batch
lowercase_ : List[str] = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase_ : List[str] = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case : int ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowercase_ : str = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
lowercase_ : str = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
lowercase_ : Any = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase_ : Any = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case : Tuple ):
lowercase_ : str = pred.predictions
lowercase_ : Optional[Any] = np.argmax(__snake_case , axis=-1 )
lowercase_ : int = processor.tokenizer.pad_token_id
lowercase_ : List[Any] = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
lowercase_ : Union[str, Any] = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
lowercase_ : List[str] = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase_ : int = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
lowercase_ : Any = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ : List[str] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase_ : List[str] = model_args.model_name_or_path
else:
lowercase_ : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase_ : Tuple = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
lowercase_ : List[str] = train_result.metrics
lowercase_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
lowercase_ : Union[str, Any] = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
lowercase_ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ : Tuple = trainer.evaluate()
lowercase_ : Tuple = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
lowercase_ : List[str] = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 33 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A =[
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A ='UperNetConfig'
class _a ( nn.Module ):
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : Union[int, Tuple[int, int]] , lowercase : Union[int, Tuple[int, int], str] = 0 , lowercase : bool = False , lowercase : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=lowercase , out_channels=lowercase , kernel_size=lowercase , padding=lowercase , bias=lowercase , dilation=lowercase , )
UpperCAmelCase = nn.BatchNormad(lowercase )
UpperCAmelCase = nn.ReLU()
def A ( self : Dict , lowercase : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase = self.conv(lowercase )
UpperCAmelCase = self.batch_norm(lowercase )
UpperCAmelCase = self.activation(lowercase )
return output
class _a ( nn.Module ):
def __init__( self : Dict , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(lowercase ),
UperNetConvModule(lowercase , lowercase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowercase ) , lowercase )
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(lowercase )
return hidden_state
class _a ( nn.Module ):
def __init__( self : List[Any] , lowercase : Tuple[int, ...] , lowercase : int , lowercase : int , lowercase : bool ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(lowercase ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=lowercase , in_channels=lowercase , channels=lowercase )
self.blocks.append(lowercase )
self.add_module(str(lowercase ) , lowercase )
def A ( self : Union[str, Any] , lowercase : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(lowercase )
UpperCAmelCase = nn.functional.interpolate(
lowercase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(lowercase )
return ppm_outs
class _a ( nn.Module ):
def __init__( self : Union[str, Any] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(lowercase , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowercase )
self.fpn_convs.append(lowercase )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A ( self : Optional[int] ):
'''simple docstring'''
self.apply(self._init_weights )
def A ( self : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A ( self : str , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(lowercase ) )
UpperCAmelCase = torch.cat(lowercase , dim=1 )
UpperCAmelCase = self.bottleneck(lowercase )
return output
def A ( self : Optional[int] , lowercase : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowercase ) )
# build top-down path
UpperCAmelCase = len(lowercase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowercase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(lowercase , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(lowercase )
UpperCAmelCase = self.classifier(lowercase )
return output
class _a ( nn.Module ):
def __init__( self : Optional[Any] , lowercase : List[Any] , lowercase : int = 2 , lowercase : int = 3 , lowercase : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowercase , padding=lowercase , dilation=lowercase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowercase , padding=lowercase , dilation=lowercase ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*lowercase )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowercase , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def A ( self : List[Any] , lowercase : List[str] ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(lowercase )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(lowercase )
return output
class _a ( __a ):
__a : int = UperNetConfig
__a : Optional[int] = """pixel_values"""
__a : Optional[int] = True
def A ( self : int , lowercase : str ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A ( self : List[Any] ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A ( self : int , lowercase : Any , lowercase : List[str]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
UpperCAmelCase = value
A =r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __a , )
class _a ( __a ):
def __init__( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
super().__init__(lowercase )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(lowercase , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(lowercase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Any , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
lowercase , output_hidden_states=lowercase , output_attentions=lowercase )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(lowercase )
UpperCAmelCase = nn.functional.interpolate(lowercase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(lowercase )
UpperCAmelCase = nn.functional.interpolate(
lowercase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(lowercase , lowercase )
UpperCAmelCase = loss_fct(lowercase , lowercase )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 34 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "falcon"
lowercase = ["past_key_values"]
def __init__( self : List[Any] , snake_case_ : List[str]=65_024 , snake_case_ : str=4_544 , snake_case_ : int=32 , snake_case_ : str=71 , snake_case_ : Union[str, Any]=1E-5 , snake_case_ : Tuple=0.02 , snake_case_ : Dict=True , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : List[str]=None , snake_case_ : str=False , snake_case_ : Any=False , snake_case_ : Dict=True , snake_case_ : Tuple=True , snake_case_ : Tuple=False , snake_case_ : int=11 , snake_case_ : Tuple=11 , **snake_case_ : Union[str, Any] , ):
snake_case__ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : Any = kwargs.pop("""n_embed""" , snake_case_ )
snake_case__ : Optional[int] = hidden_size if n_embed is None else n_embed
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Tuple = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[str] = use_cache
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : Tuple = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : List[str] = eos_token_id
snake_case__ : Dict = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case__ : Optional[Any] = alibi
snake_case__ : List[Any] = new_decoder_architecture
snake_case__ : int = multi_query # Ignored when new_decoder_architecture is True
snake_case__ : Optional[Any] = parallel_attn
snake_case__ : int = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : Any ):
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : str ):
return not self.alibi
| 35 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 0 |
def A ( _lowerCamelCase = 50_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : Any = int((limit - 24) ** (1 / 2) )
_lowerCAmelCase : Tuple = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _lowerCamelCase ) ) )
for primea in primes:
_lowerCAmelCase : Dict = primea * primea
for primea in primes:
_lowerCAmelCase : Any = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowerCAmelCase : Optional[int] = primea * primea * primea * primea
_lowerCAmelCase : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(_lowerCamelCase )
return len(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = 1, 1
lowerCAmelCase__ : int = []
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[str] = prev_numerator + 2 * prev_denominator
lowerCAmelCase__ : List[Any] = prev_numerator + prev_denominator
if len(str(UpperCamelCase ) ) > len(str(UpperCamelCase ) ):
result.append(UpperCamelCase )
lowerCAmelCase__ : str = numerator
lowerCAmelCase__ : Union[str, Any] = denominator
return len(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self : Dict ):
UpperCamelCase :Tuple = 1
UpperCamelCase :Optional[Any] = 3
UpperCamelCase :Union[str, Any] = (32, 32)
UpperCamelCase :List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase :Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _A ( self : str ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[Any] = self.dummy_cond_unet_upscale
UpperCamelCase :Optional[Any] = DDPMScheduler()
UpperCamelCase :Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCamelCase :List[str] = self.dummy_vae
UpperCamelCase :Optional[Any] = self.dummy_text_encoder
UpperCamelCase :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase :List[Any] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase :Tuple = output.images
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCamelCase , )[0]
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase :List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase :Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase :str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Union[str, Any] ):
UpperCamelCase :Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[Any] = self.dummy_cond_unet_upscale
UpperCamelCase :Dict = DDPMScheduler()
UpperCamelCase :int = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCamelCase :List[str] = self.dummy_vae
UpperCamelCase :Optional[int] = self.dummy_text_encoder
UpperCamelCase :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase :str = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = """A painting of a squirrel eating a burger"""
UpperCamelCase :Dict = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase :List[str] = output.images
assert image.shape[0] == 2
UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Dict = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase :Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = self.dummy_cond_unet_upscale
UpperCamelCase :Tuple = DDPMScheduler()
UpperCamelCase :str = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCamelCase :int = self.dummy_vae
UpperCamelCase :Union[str, Any] = self.dummy_text_encoder
UpperCamelCase :List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase :str = unet.half()
UpperCamelCase :Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase :str = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase :Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[int] = """A painting of a squirrel eating a burger"""
UpperCamelCase :Tuple = torch.manual_seed(0 )
UpperCamelCase :List[Any] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="""np""" , ).images
UpperCamelCase :str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCamelCase :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
UpperCamelCase :List[Any] = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase :List[str] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase :Optional[int] = """a cat sitting on a park bench"""
UpperCamelCase :str = torch.manual_seed(0 )
UpperCamelCase :List[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , )
UpperCamelCase :int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _A ( self : Dict ):
UpperCamelCase :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCamelCase :Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
UpperCamelCase :Any = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase :Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase :Optional[int] = """a cat sitting on a park bench"""
UpperCamelCase :str = torch.manual_seed(0 )
UpperCamelCase :Tuple = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , )
UpperCamelCase :Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _A ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase :Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCamelCase :Any = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase :Tuple = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase :Optional[Any] = """a cat sitting on a park bench"""
UpperCamelCase :Any = torch.manual_seed(0 )
UpperCamelCase :Dict = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="""np""" , )
UpperCamelCase :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 38 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "roformer"
def __init__( self , UpperCAmelCase=5_0000 , UpperCAmelCase=None , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1536 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 39 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple , __UpperCAmelCase : Any):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
a : Dict = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = "sshleifer/tiny-gpt2"
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Union[str, Any] = PyTorchBenchmark(__UpperCAmelCase)
a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Optional[int]):
a : List[str] = "sgugger/tiny-distilbert-classification"
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
a : Optional[int] = PyTorchBenchmark(__UpperCAmelCase)
a : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Any):
a : List[str] = "sshleifer/tiny-gpt2"
a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : str = PyTorchBenchmark(__UpperCAmelCase)
a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def __snake_case ( self : str):
a : Any = "sshleifer/tiny-gpt2"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Any = PyTorchBenchmark(__UpperCAmelCase)
a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Tuple):
a : Any = "sshleifer/tiny-gpt2"
a : Any = AutoConfig.from_pretrained(__UpperCAmelCase)
# set architectures equal to `None`
a : List[Any] = None
a : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : List[Any] = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : List[str]):
a : Tuple = "sshleifer/tiny-gpt2"
a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Dict = PyTorchBenchmark(__UpperCAmelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def __snake_case ( self : Optional[Any]):
a : Any = "sshleifer/tiny-gpt2"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
a : str = PyTorchBenchmark(__UpperCAmelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : int):
a : Optional[Any] = "sshleifer/tiny-gpt2"
a : str = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Any):
a : str = "sshleifer/tinier_bart"
a : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : List[str] = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Optional[Any]):
a : Dict = "sshleifer/tiny-gpt2"
a : Dict = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : Optional[int]):
a : Any = "sshleifer/tinier_bart"
a : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Dict = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : int):
a : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__UpperCAmelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__UpperCAmelCase , "train_time.csv") , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv") , multi_process=__UpperCAmelCase , )
a : List[str] = PyTorchBenchmark(__UpperCAmelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv")).exists())
def __snake_case ( self : int):
a : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : str):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential"))
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative"))
self.assertTrue(hasattr(__UpperCAmelCase , "current"))
self.assertTrue(hasattr(__UpperCAmelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt") , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
a : Optional[Any] = PyTorchBenchmark(__UpperCAmelCase)
a : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt")).exists())
| 40 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_A : List[str] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_A : Dict =os.path.join(git_repo_path, '''src''', '''transformers''')
_A : Dict ='''
{0} = None
'''
_A : Dict ='''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
_A : List[str] ='''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
lowerCamelCase__ : Tuple = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
lowerCamelCase__ : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
lowerCamelCase__ : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
lowerCamelCase__ : str = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Tuple = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
lowerCamelCase__ : List[str] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCamelCase__ : List[str] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCamelCase__ : int = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCamelCase__ : Any = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 41 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 0 |
from random import randint, random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
__UpperCamelCase :Union[str, Any] = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
__UpperCamelCase :List[str] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase :Optional[int] = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
__UpperCamelCase :Dict = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
__UpperCamelCase :Tuple = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase :Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
_UpperCamelCase : Any = XGLMConfig
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Optional[int] = "gelu"
def __init__( self , a__ , a__=14 , a__=7 , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.0_2 , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[int] = use_input_mask
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = ffn_dim
_lowerCAmelCase : Any = activation_function
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = 2
_lowerCAmelCase : Optional[int] = 1
def __A ( self ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
_lowerCAmelCase : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase : Tuple = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = self.get_config()
_lowerCAmelCase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a__ , )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCamelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
_lowerCAmelCase : List[str] = TFXGLMModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=a__ , n_embd=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@slow
def __A ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = TFXGLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def __A ( self ):
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
@slow
def __A ( self , a__=True ):
_lowerCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase : Any = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
_lowerCAmelCase : str = model.generate(a__ , do_sample=a__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : str = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase : List[Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase : List[str] = model.generate(a__ , do_sample=a__ , seed=[7, 0] )
_lowerCAmelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Optional[Any] = """left"""
# use different length sentences to test batching
_lowerCAmelCase : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase : int = tokenizer(a__ , return_tensors="""tf""" , padding=a__ )
_lowerCAmelCase : Optional[int] = inputs["""input_ids"""]
_lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase : Any = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
_lowerCAmelCase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
_lowerCAmelCase : Any = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Dict = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , [non_padded_sentence, padded_sentence] )
| 44 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : torch.FloatTensor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 3 , _a = 3 , _a = ("DownEncoderBlock2D",) , _a = ("UpDecoderBlock2D",) , _a = (64,) , _a = 1 , _a = "silu" , _a = 3 , _a = 32 , _a = 256 , _a = 32 , _a = None , _a = 0.1_8215 , _a = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(_a , _a , 1 )
__a = VectorQuantizer(_a , _a , beta=0.25 , remap=_a , sane_index_shape=_a )
__a = nn.Convad(_a , _a , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , norm_type=_a , )
@apply_forward_hook
def __UpperCAmelCase ( self , _a , _a = True ):
__a = self.encoder(_a )
__a = self.quant_conv(_a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_a )
@apply_forward_hook
def __UpperCAmelCase ( self , _a , _a = False , _a = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(_a )
else:
__a = h
__a = self.post_quant_conv(_a )
__a = self.decoder(_a , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def __UpperCAmelCase ( self , _a , _a = True ):
__a = sample
__a = self.encode(_a ).latents
__a = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 45 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase = arr[i]
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 46 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """megatron-bert"""
def __init__( self , UpperCamelCase__=2_9056 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[int]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : int = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : Any = use_cache
| 48 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__snake_case :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a , __a = {}, {}
if padding is not None:
__a = padding
if truncation is not None:
__a = truncation
if top_k is not None:
__a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union["Image.Image", str] , __SCREAMING_SNAKE_CASE : str = None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, str)) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = {'''image''': image, '''question''': question}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
return model_inputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.model(**__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=5):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.sigmoid()[0]
__a , __a = probs.topk(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
| 49 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """xlm-roberta-xl"""
def __init__( self : List[str] , UpperCAmelCase : Dict=250880 , UpperCAmelCase : int=2560 , UpperCAmelCase : Tuple=36 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=10240 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=514 , UpperCAmelCase : Any=1 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-05 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]="absolute" , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : int = classifier_dropout
class lowerCAmelCase ( __UpperCamelCase ):
@property
def A_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 50 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Tuple , _snake_case : Tuple , _snake_case : List[Any]=13 , _snake_case : Dict=7 , _snake_case : List[str]=True , _snake_case : Dict=True , _snake_case : int=True , _snake_case : Tuple=True , _snake_case : Any=99 , _snake_case : Dict=24 , _snake_case : List[Any]=2 , _snake_case : Optional[int]=6 , _snake_case : str=37 , _snake_case : List[str]="gelu" , _snake_case : int=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[Any]=512 , _snake_case : List[Any]=16 , _snake_case : Dict=2 , _snake_case : Dict=0.0_2 , _snake_case : Optional[int]=3 , _snake_case : str=None , _snake_case : List[str]=1000 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = range_bbox
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ = bbox[i, j, 3]
UpperCAmelCase_ = bbox[i, j, 1]
UpperCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ = bbox[i, j, 2]
UpperCAmelCase_ = bbox[i, j, 0]
UpperCAmelCase_ = t
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : str , _snake_case : Dict , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = LiltModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , bbox=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , bbox=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : List[str] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Tuple , _snake_case : int , _snake_case : Tuple , _snake_case : str , _snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LiltForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Any , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str , _snake_case : int , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = LiltForQuestionAnswering(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = False
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Dict , _snake_case : Any):
"""simple docstring"""
return True
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = LiltModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : str):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LiltModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(_snake_case)
UpperCAmelCase_ = torch.tensor([[1, 2]] , device=_snake_case)
UpperCAmelCase_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(input_ids=_snake_case , bbox=_snake_case)
UpperCAmelCase_ = torch.Size([1, 2, 768])
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=_snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , _snake_case)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _snake_case , atol=1e-3))
| 51 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 0 |
from collections.abc import Callable
import numpy as np
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase : Dict = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase : Any = np.zeros((n + 1,) )
UpperCamelCase : List[Any] = ya
UpperCamelCase : Tuple = xa
for k in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = y[k] + step_size * ode_func(_lowerCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : List[str]="" , __A : List[Any]="train" ):
assert os.path.isdir(__A )
__UpperCamelCase = []
__UpperCamelCase = os.listdir(__A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__UpperCamelCase = os.path.join(__A , __A )
if not os.path.isfile(__A ):
continue
self.documents.append(__A )
def __len__( self : Dict ):
return len(self.documents )
def __getitem__( self : Optional[int] , __A : Optional[int] ):
__UpperCamelCase = self.documents[idx]
__UpperCamelCase = document_path.split('/' )[-1]
with open(__A , encoding='utf-8' ) as source:
__UpperCamelCase = source.read()
__UpperCamelCase , __UpperCamelCase = process_story(__A )
return document_name, story_lines, summary_lines
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__UpperCamelCase = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
__UpperCamelCase = []
__UpperCamelCase = deque(__lowercase )
while True:
try:
__UpperCamelCase = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__UpperCamelCase = list(filter(lambda __lowercase : not t.startswith('@highlight' ) , __lowercase ) )
return story_lines, summary_lines
def lowercase__ ( __lowercase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase__ ( __lowercase : str , __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def lowercase__ ( __lowercase : List[Any] , __lowercase : int ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = torch.ones_like(__lowercase )
__UpperCamelCase = sequence == pad_token_id
__UpperCamelCase = 0
return mask
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int , __lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = [tokenizer.encode(__lowercase ) for line in story_lines]
__UpperCamelCase = [token for sentence in story_lines_token_ids for token in sentence]
__UpperCamelCase = [tokenizer.encode(__lowercase ) for line in summary_lines]
__UpperCamelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase__ ( __lowercase : Dict , __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for sequence in batch:
__UpperCamelCase = -1
__UpperCamelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 53 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , attrs="titleColumn" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase_ , lowerCAmelCase_ )
}
def UpperCAmelCase__ (lowerCAmelCase_ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_imdb_top_aaa_movies()
with open(lowerCAmelCase_ , "w" , newline="" ) as out_file:
__SCREAMING_SNAKE_CASE = csv.writer(lowerCAmelCase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 54 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[8, 16, 32, 64] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=1 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = num_groups
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCamelCase = BitConfig
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitModelTester(self )
| 55 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = original_name.split("." )[0]
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find("proj" )]
__lowerCAmelCase = key.replace(_UpperCamelCase , f"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1000
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1000)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 12, 4]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 24, 8]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 57 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_( self , A , A , A , A , A , A , A ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case_( self , A , A , A , A , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self , A , A , A , A , A ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 )
def snake_case_( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_0000
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-4 )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 1E-4
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A , A , atol=self.tolerance )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A , A , atol=self.tolerance )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 1E-4
def snake_case_( self ) -> Union[str, Any]:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 768] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A , A , A )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A , atol=self.tolerance )
| 58 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("""T""")
__lowerCamelCase = TypeVar("""U""")
class UpperCAmelCase ( Generic[T, U] ):
def __init__(self : Dict , snake_case__ : T | None , snake_case__ : U | None ) -> List[str]:
'''simple docstring'''
snake_case : Any = key
snake_case : int = val
snake_case : DoubleLinkedListNode[T, U] | None = None
snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__(self : List[Any] ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCAmelCase ( Generic[T, U] ):
def __init__(self : List[Any] ) -> None:
'''simple docstring'''
snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ )
snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ )
snake_case , snake_case : Optional[int] = self.rear, self.head
def __repr__(self : int ) -> str:
'''simple docstring'''
snake_case : Dict = ["DoubleLinkedList"]
snake_case : Optional[int] = self.head
while node.next is not None:
rep.append(str(snake_case__ ) )
snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
snake_case : Any = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case : str = node
snake_case : Any = previous
snake_case : str = node
snake_case : Dict = self.rear
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
snake_case : Any = node.next
snake_case : int = node.prev
snake_case : str = None
snake_case : str = None
return node
class UpperCAmelCase ( Generic[T, U] ):
A__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self : Union[str, Any] , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case : Tuple = capacity
snake_case : Dict = 0
snake_case : Tuple = 0
snake_case : int = 0
snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self : Optional[Any] ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__(self : Dict , snake_case__ : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case__ )
return node.val
self.miss += 1
return None
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : T , snake_case__ : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case : Tuple = DoubleLinkedListNode(snake_case__ , snake_case__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case : str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case : str = value
self.list.add(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : List[str] , snake_case__ : int = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case : Optional[int] = LRUCache(snake_case__ )
snake_case : Any = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case : Tuple = func(*snake_case__ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case__ , "cache_info" , snake_case__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case_( a__ ):
__UpperCamelCase = '''Wav2Vec2FeatureExtractor'''
__UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = self.feature_extractor
lowerCAmelCase : List[str] = False
@classmethod
def lowerCamelCase__ ( cls : Tuple , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase : Tuple = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase : Tuple = kwargs.pop('''audio''' , UpperCamelCase_ )
lowerCAmelCase : List[str] = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
lowerCAmelCase : int = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase : Optional[int] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : List[Any] = encodings['''input_ids''']
return inputs
def lowerCamelCase__ ( self : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = kwargs.pop('''input_features''' , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase : Union[str, Any] = args[0]
lowerCAmelCase : Optional[int] = args[1:]
if input_features is not None:
lowerCAmelCase : Tuple = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase : int = labels['''input_ids''']
return input_features
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCamelCase__ ( self : Optional[int] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : List[str] = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Union[str, Any] = False
| 60 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.