code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__magic_name__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ : Optional[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=8 ):
'''simple docstring'''
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=5_12 ):
'''simple docstring'''
_snake_case = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_snake_case = np.array(pil_image.convert("RGB" ) )
_snake_case = arr.astype(np.floataa ) / 1_27.5 - 1
_snake_case = np.transpose(__a , [2, 0, 1] )
_snake_case = torch.from_numpy(__a ).unsqueeze(0 )
return image
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# get the original timestep using init_timestep
_snake_case = min(int(num_inference_steps * strength ) , lowercase_ )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
_snake_case = image.to(device=lowercase_ , dtype=lowercase_ )
_snake_case = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_snake_case = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowercase_ , lowercase_ ):
_snake_case = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
_snake_case = torch.cat(lowercase_ , dim=0 )
else:
_snake_case = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
_snake_case = self.movq.config.scaling_factor * init_latents
_snake_case = torch.cat([init_latents] , dim=0 )
_snake_case = init_latents.shape
_snake_case = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
_snake_case = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
_snake_case = init_latents
return latents
def UpperCamelCase( self , lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_snake_case = torch.device(F'''cuda:{gpu_id}''' )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase( self , lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_snake_case = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 0.3 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ):
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
_snake_case = torch.cat(lowercase_ , dim=0 )
_snake_case = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
_snake_case = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(lowercase_ , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
_snake_case = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_snake_case = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
_snake_case = image.to(dtype=image_embeds.dtype , device=lowercase_ )
_snake_case = self.movq.encode(lowercase_ )["latents"]
_snake_case = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
_snake_case , _snake_case = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
_snake_case = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_snake_case , _snake_case = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
_snake_case = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {"image_embeds": image_embeds}
_snake_case = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
_snake_case = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowercase ( lowercase__ ):
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
self.test()
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
A_ = 0
A_ = False
while not completed:
if counter == 1:
self.reset()
A_ = self.advance()
if not self.does_advance(lowercase_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
A_ ,A_ ,A_ = self.update(lowercase_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : str , lowerCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowercase ( lowercase__ ):
def __init__( self : List[str] , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
A_ = token_ids
A_ = len(self.token_ids )
A_ = -1 # the index of the currently fulfilled step
A_ = False
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self : int , lowerCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
A_ = False
A_ = False
A_ = False
if self.does_advance(lowercase_ ):
self.fulfilled_idx += 1
A_ = True
if self.fulfilled_idx == (self.seqlen - 1):
A_ = True
A_ = completed
else:
# failed to make progress.
A_ = True
self.reset()
return stepped, completed, reset
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = False
A_ = 0
def UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase ( self : Dict , lowerCamelCase__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
A_ = PhrasalConstraint(self.token_ids )
if stateful:
A_ = self.seqlen
A_ = self.fulfilled_idx
A_ = self.completed
return new_constraint
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
A_ = max([len(lowercase_ ) for one in nested_token_ids] )
A_ = {}
for token_ids in nested_token_ids:
A_ = root
for tidx, token_id in enumerate(lowercase_ ):
if token_id not in level:
A_ = {}
A_ = level[token_id]
if no_subsets and self.has_subsets(lowercase_ , lowercase_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
A_ = root
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.trie
for current_token in current_seq:
A_ = start[current_token]
A_ = list(start.keys() )
return next_tokens
def UpperCamelCase ( self : Any , lowerCamelCase__ : str ) -> List[str]:
"""simple docstring"""
A_ = self.next_tokens(lowercase_ )
return len(lowercase_ ) == 0
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Any ) -> str:
"""simple docstring"""
A_ = list(root.values() )
if len(lowercase_ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase_ ) for nn in next_nodes] )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
A_ = self.count_leaves(lowercase_ )
return len(lowercase_ ) != leaf_count
class _lowercase ( lowercase__ ):
def __init__( self : Dict , lowerCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(lowercase_ , lowercase_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
A_ = DisjunctiveTrie(lowercase_ )
A_ = nested_token_ids
A_ = self.trie.max_height
A_ = []
A_ = False
def UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = self.trie.next_tokens(self.current_seq )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
A_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
A_ = False
A_ = False
A_ = False
if self.does_advance(lowercase_ ):
self.current_seq.append(lowercase_ )
A_ = True
else:
A_ = True
self.reset()
A_ = self.trie.reached_leaf(self.current_seq )
A_ = completed
return stepped, completed, reset
def UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
A_ = False
A_ = []
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
A_ = DisjunctiveConstraint(self.token_ids )
if stateful:
A_ = self.seqlen
A_ = self.current_seq
A_ = self.completed
return new_constraint
class _lowercase :
def __init__( self : Optional[int] , lowerCamelCase__ : Dict ) -> int:
"""simple docstring"""
A_ = constraints
# max # of steps required to fulfill a given constraint
A_ = max([c.seqlen for c in constraints] )
A_ = len(lowercase_ )
A_ = False
self.init_state()
def UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A_ = []
A_ = None
A_ = [constraint.copy(stateful=lowercase_ ) for constraint in self.constraints]
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A_ = constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
else:
A_ = self.inprogress_constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A_ ,A_ = self.add(lowercase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
A_ ,A_ = False, False
if self.completed:
A_ = True
A_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A_ ,A_ ,A_ = self.inprogress_constraint.update(lowercase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase_ ) )
A_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A_ = None
if len(self.pending_constraints ) == 0:
# we're done!
A_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase_ ):
A_ ,A_ ,A_ = pending_constraint.update(lowercase_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowercase_ )
A_ = None
if not complete and stepped:
A_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Dict=True ) -> Union[str, Any]:
"""simple docstring"""
A_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A_ = [
constraint.copy(stateful=lowercase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A_ = self.inprogress_constraint.copy(stateful=lowercase_ )
A_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 203 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> str:
lowerCAmelCase = name
lowerCAmelCase = val
def __str__( self ) -> Tuple:
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , A_ ) -> Any:
return self.val < other.val
class __snake_case:
'''simple docstring'''
def __init__( self , A_ ) -> Any:
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = self.build_heap(lowercase_ )
def __getitem__( self , A_ ) -> List[str]:
return self.get_value(lowercase_ )
def __snake_case ( self , A_ ) -> Optional[int]:
return (idx - 1) // 2
def __snake_case ( self , A_ ) -> int:
return idx * 2 + 1
def __snake_case ( self , A_ ) -> List[Any]:
return idx * 2 + 2
def __snake_case ( self , A_ ) -> Any:
return self.heap_dict[key]
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = len(lowercase_ ) - 1
lowerCAmelCase = self.get_parent_idx(lowercase_ )
for idx, i in enumerate(lowercase_ ):
lowerCAmelCase = idx
lowerCAmelCase = i.val
for i in range(lowercase_ , -1 , -1 ):
self.sift_down(lowercase_ , lowercase_ )
return array
def __snake_case ( self , A_ , A_ ) -> List[str]:
while True:
lowerCAmelCase = self.get_left_child_idx(lowercase_ ) # noqa: E741
lowerCAmelCase = self.get_right_child_idx(lowercase_ )
lowerCAmelCase = idx
if l < len(lowercase_ ) and array[l] < array[idx]:
lowerCAmelCase = l
if r < len(lowercase_ ) and array[r] < array[smallest]:
lowerCAmelCase = r
if smallest != idx:
lowerCAmelCase, lowerCAmelCase = array[smallest], array[idx]
(
(
lowerCAmelCase
), (
lowerCAmelCase
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase = smallest
else:
break
def __snake_case ( self , A_ ) -> Dict:
lowerCAmelCase = self.get_parent_idx(lowercase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase, lowerCAmelCase = self.heap[idx], self.heap[p]
lowerCAmelCase, lowerCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase = p
lowerCAmelCase = self.get_parent_idx(lowercase_ )
def __snake_case ( self ) -> List[str]:
return self.heap[0]
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase, lowerCAmelCase = self.heap[-1], self.heap[0]
lowerCAmelCase, lowerCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __snake_case ( self , A_ ) -> Tuple:
self.heap.append(lowercase_ )
lowerCAmelCase = len(self.heap ) - 1
lowerCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def __snake_case ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def __snake_case ( self , A_ , A_ ) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase = new_value
lowerCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase = Node('R', -1)
UpperCAmelCase = Node('B', 6)
UpperCAmelCase = Node('A', 3)
UpperCAmelCase = Node('X', 1)
UpperCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
from itertools import product
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : List[str] = sides_number
lowercase : str = max_face_number * dice_number
lowercase : Optional[int] = [0] * (max_total + 1)
lowercase : List[Any] = 1
lowercase : int = range(__a , max_face_number + 1 )
for dice_numbers in product(__a , repeat=__a ):
lowercase : Tuple = sum(__a )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case( ) -> Union[str, Any]:
lowercase : Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase : str = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase : Optional[Any] = 0
lowercase : Tuple = 9
lowercase : Union[str, Any] = 4 * 9
lowercase : Dict = 6
for peter_total in range(__a , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase : List[Any] = (4**9) * (6**6)
lowercase : List[str] = peter_wins_count / total_games_number
lowercase : Tuple = round(__a , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( lowercase__ , unittest.TestCase):
'''simple docstring'''
_snake_case = DebertaTokenizer
_snake_case = True
_snake_case = DebertaTokenizerFast
def a__ ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowercase : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase : Dict = {"unk_token": "[UNK]"}
lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def a__ ( self , **a_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def a__ ( self , a_ ) -> Optional[int]:
lowercase : List[str] = "lower newer"
lowercase : Any = "lower newer"
return input_text, output_text
def a__ ( self ) -> str:
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : List[str] = "lower newer"
lowercase : Optional[int] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase : Tuple = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase : Tuple = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def a__ ( self ) -> List[str]:
lowercase : str = self.get_tokenizer()
lowercase : Any = tokenizer("Hello" , "World" )
lowercase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowercase_ )
@slow
def a__ ( self ) -> int:
lowercase : List[str] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowercase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
lowercase : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
lowercase : Tuple = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase : Optional[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase : str = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a__ ( self ) -> str:
lowercase : List[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase : Optional[int] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowercase : Any = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowercase : Any = tokenizer(lowercase_ , padding=lowercase_ )
lowercase : List[str] = [tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) for seq in encoding["input_ids"]]
# fmt: off
lowercase : Optional[int] = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase : Optional[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowercase_ )
for expected, decoded in zip(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
| 372 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase ( A__ , A__=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCamelCase ( A__ , A__ , A__=False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase = ''
else:
UpperCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = dct.pop(__a )
UpperCamelCase = val
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ViTConfig()
UpperCamelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCamelCase = True
UpperCamelCase = int(vit_name[-12:-10] )
UpperCamelCase = int(vit_name[-9:-6] )
else:
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = int(vit_name[-6:-4] )
UpperCamelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
UpperCamelCase = 192
UpperCamelCase = 768
UpperCamelCase = 12
UpperCamelCase = 3
elif vit_name[9:].startswith('small' ):
UpperCamelCase = 384
UpperCamelCase = 1_536
UpperCamelCase = 12
UpperCamelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
UpperCamelCase = 768
UpperCamelCase = 2_304
UpperCamelCase = 8
UpperCamelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 24
UpperCamelCase = 16
elif vit_name[4:].startswith('huge' ):
UpperCamelCase = 1_280
UpperCamelCase = 5_120
UpperCamelCase = 32
UpperCamelCase = 16
# load original model from timm
UpperCamelCase = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(__a )
UpperCamelCase = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase = ViTModel(__a ).eval()
else:
UpperCamelCase = ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCamelCase = DeiTImageProcessor(size=config.image_size )
else:
UpperCamelCase = ViTImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase = encoding['pixel_values']
UpperCamelCase = model(__a )
if base_model:
UpperCamelCase = timm_model.forward_features(__a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__a , outputs.pooler_output , atol=1e-3 )
else:
UpperCamelCase = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCAmelCase = get_logger(__name__)
__lowerCAmelCase = Path(__file__).parent / 'model_card_template.md'
__lowerCAmelCase = uuida().hex
__lowerCAmelCase = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = None ):
_snake_case = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
if token is None:
_snake_case = HfFolder.get_token()
if organization is None:
_snake_case = whoami(__a )["""name"""]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(__a , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
_snake_case = args.hub_token if hasattr(__a , """hub_token""" ) else None
_snake_case = get_full_repo_name(__a , token=__a )
_snake_case = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(__a , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
_snake_case = os.path.join(args.output_dir , """README.md""" )
model_card.save(__a )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
_snake_case = str(Path(__a ).as_posix() )
_snake_case = re.search(R"""snapshots/([^/]+)/""" , __a )
if search is None:
return None
_snake_case = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCAmelCase = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__lowerCAmelCase = os.path.join(hf_cache_home, 'diffusers')
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
if new_cache_dir is None:
_snake_case = DIFFUSERS_CACHE
if old_cache_dir is None:
_snake_case = old_diffusers_cache
_snake_case = Path(__a ).expanduser()
_snake_case = Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_snake_case = new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCAmelCase = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__lowerCAmelCase = 0
else:
with open(cache_version_file) as f:
try:
__lowerCAmelCase = int(f.read())
except ValueError:
__lowerCAmelCase = 0
if cache_version < 1:
__lowerCAmelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__lowerCAmelCase = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if variant is not None:
_snake_case = weights_name.split(""".""" )
_snake_case = splits[:-1] + [variant] + splits[-1:]
_snake_case = """.""".join(__a )
return weights_name
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , *,
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
_snake_case = str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
_snake_case = os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
_snake_case = os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse("""0.20.0""" )
):
try:
_snake_case = hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
_snake_case = hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"""listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"""this model name. Check the model page at """
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"""\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" ) | 585 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase :
def __init__( self :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any]=13 , __magic_name__ :List[str]=7 , __magic_name__ :int=True , __magic_name__ :Tuple=True , __magic_name__ :Dict=True , __magic_name__ :Optional[int]=True , __magic_name__ :Tuple=99 , __magic_name__ :int=[1, 1, 2] , __magic_name__ :Optional[int]=1 , __magic_name__ :str=32 , __magic_name__ :Tuple=4 , __magic_name__ :Optional[int]=8 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :Dict="gelu_new" , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :str=512 , __magic_name__ :Tuple=3 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :Tuple=3 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :List[Any]=None , __magic_name__ :List[str]=False , ) ->int:
lowercase : List[Any] = parent
lowercase : Union[str, Any] = batch_size
lowercase : List[str] = seq_length
lowercase : Any = is_training
lowercase : str = use_input_mask
lowercase : Any = use_token_type_ids
lowercase : Optional[int] = use_labels
lowercase : int = vocab_size
lowercase : Union[str, Any] = block_sizes
lowercase : Dict = num_decoder_layers
lowercase : Union[str, Any] = d_model
lowercase : Dict = n_head
lowercase : Dict = d_head
lowercase : Union[str, Any] = d_inner
lowercase : Optional[Any] = hidden_act
lowercase : List[Any] = hidden_dropout
lowercase : Union[str, Any] = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Optional[int] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = 2
lowercase : Tuple = num_labels
lowercase : int = num_choices
lowercase : Dict = scope
lowercase : Union[str, Any] = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase : List[str] = n_head
# Used in the tests to check the size of the first hidden state
lowercase : int = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase : Dict = self.num_hidden_layers + 2
def __snake_case ( self :Any ) ->Any:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = None
if self.use_input_mask:
lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : Tuple = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , ) ->int:
lowercase : str = TFFunnelModel(config=lowercase_ )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : str = model(lowercase_ )
lowercase : Optional[int] = [input_ids, input_mask]
lowercase : Tuple = model(lowercase_ )
lowercase : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase : Tuple = False
lowercase : Optional[Any] = TFFunnelModel(config=lowercase_ )
lowercase : Any = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase : str = False
lowercase : int = TFFunnelModel(config=lowercase_ )
lowercase : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Any , ) ->List[str]:
lowercase : Tuple = TFFunnelBaseModel(config=lowercase_ )
lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(lowercase_ )
lowercase : Optional[Any] = [input_ids, input_mask]
lowercase : Any = model(lowercase_ )
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowercase : Union[str, Any] = False
lowercase : Optional[Any] = TFFunnelBaseModel(config=lowercase_ )
lowercase : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowercase : Tuple = False
lowercase : Union[str, Any] = TFFunnelBaseModel(config=lowercase_ )
lowercase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self :List[str] , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , ) ->List[str]:
lowercase : Tuple = TFFunnelForPreTraining(config=lowercase_ )
lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self :Optional[Any] , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :List[str] , ) ->Optional[int]:
lowercase : str = TFFunnelForMaskedLM(config=lowercase_ )
lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self :Optional[int] , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[Any] , ) ->Union[str, Any]:
lowercase : str = self.num_labels
lowercase : Union[str, Any] = TFFunnelForSequenceClassification(config=lowercase_ )
lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self :int , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :str , ) ->List[str]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = TFFunnelForMultipleChoice(config=lowercase_ )
lowercase : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self :List[str] , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Tuple , ) ->Tuple:
lowercase : Optional[int] = self.num_labels
lowercase : List[Any] = TFFunnelForTokenClassification(config=lowercase_ )
lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self :List[str] , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :str , ) ->List[Any]:
lowercase : List[Any] = TFFunnelForQuestionAnswering(config=lowercase_ )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self :Any ) ->str:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict = config_and_inputs
lowercase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase (lowercase__ , lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __snake_case ( self :List[Any] ) ->List[str]:
lowercase : Any = TFFunnelModelTester(self )
lowercase : Dict = ConfigTester(self , config_class=lowercase_ )
def __snake_case ( self :Tuple ) ->Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __snake_case ( self :Any ) ->Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def __snake_case ( self :str ) ->Optional[int]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __snake_case ( self :Any ) ->Optional[int]:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def __snake_case ( self :Optional[Any] ) ->int:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class UpperCamelCase (lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
lowercase : List[str] = TFFunnelModelTester(self , base=lowercase_ )
lowercase : Tuple = ConfigTester(self , config_class=lowercase_ )
def __snake_case ( self :int ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self :List[Any] ) ->str:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def __snake_case ( self :List[Any] ) ->Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def __snake_case ( self :Any ) ->List[str]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 264 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> Any:
'''simple docstring'''
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = True
lowercase = True
lowercase = True
lowercase = True
lowercase = 99
lowercase = 384
lowercase = 2
lowercase = 4
lowercase = 37
lowercase = """gelu"""
lowercase = 0.1
lowercase = 0.1
lowercase = 512
lowercase = 16
lowercase = 2
lowercase = 0.02
lowercase = 3
lowercase = 4
lowercase = 128
lowercase = 2
lowercase = 9
lowercase = 1
lowercase = None
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = TFConvBertModel(config=lowercase_ )
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase = [input_ids, input_mask]
lowercase = model(lowercase_ )
lowercase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = TFConvBertForMaskedLM(config=lowercase_ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFConvBertForSequenceClassification(config=lowercase_ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = self.num_choices
lowercase = TFConvBertForMultipleChoice(config=lowercase_ )
lowercase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFConvBertForTokenClassification(config=lowercase_ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = TFConvBertForQuestionAnswering(config=lowercase_ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase (lowercase__ , lowercase__ , unittest.TestCase ):
__A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
def _a ( self ) -> str:
'''simple docstring'''
lowercase = TFConvBertModelTester(self )
lowercase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = True
if hasattr(lowercase_ , """use_cache""" ):
lowercase = True
lowercase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """key_length""" , lowercase_ )
for model_class in self.all_model_classes:
lowercase = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase = model_class(lowercase_ )
lowercase = len(model(lowercase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_ )
lowercase = os.path.join(lowercase_ , """saved_model""" , """1""" )
lowercase = tf.keras.models.load_model(lowercase_ )
lowercase = model(lowercase_ )
if self.is_encoder_decoder:
lowercase = outputs["""encoder_hidden_states"""]
lowercase = outputs["""encoder_attentions"""]
else:
lowercase = outputs["""hidden_states"""]
lowercase = outputs["""attentions"""]
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(lowercase_ )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """key_length""" , lowercase_ )
lowercase = getattr(self.model_tester , """key_length""" , lowercase_ )
def check_decoder_attentions_output(_lowerCAmelCase ):
lowercase = len(lowercase_ )
self.assertEqual(out_len % 2 , 0 )
lowercase = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowerCAmelCase ):
lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = model_class(lowercase_ )
lowercase = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
lowercase = model_class(lowercase_ )
lowercase = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(lowercase_ )
lowercase = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(lowercase_ )
lowercase = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@require_tf
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase = model(lowercase_ )[0]
lowercase = [1, 6, 768]
self.assertEqual(output.shape , lowercase_ )
lowercase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 588 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowercase__ , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def __lowercase ( self : int ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ : Union[str, Any] = dict(zip(lowercase_ ,range(len(lowercase_ ) ) ) )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
def __lowercase ( self : Dict ,**A : Any ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowercase ( self : List[Any] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = """tester"""
UpperCAmelCase__ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ : Any = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ : int = tokenizer.encode([special_token] ,add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) ,1 )
UpperCAmelCase__ : Union[str, Any] = tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(lowercase_ )
UpperCAmelCase__ : int = tokenizer.tokenize(lowercase_ )
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase__ : int = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
UpperCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) ,0 )
UpperCAmelCase__ : str = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ ,lowercase_ )
self.assertEqual(text_a.replace(""" """ ,"""""" ) ,lowercase_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 65 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case = flax_key_tuple[:-1] + ("weight",)
_snake_case = torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
_snake_case = flax_key_tuple[:-1] + ("weight",)
_snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "metadata" in layer:
_snake_case = layer.split("metadata" )
_snake_case = "".join(split_layer[0] )[:-1]
_snake_case = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case = layer.split("kvstore" )
_snake_case = "".join(split_layer[0] )[:-1]
_snake_case = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case = layer.split("/" )
_snake_case = "/".join(split_layer[:-1] )
_snake_case = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_snake_case = "file"
else:
_snake_case = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = rename_keys(__a )
_snake_case = {}
for k, v in current_block.items():
_snake_case = v
_snake_case = new_current_block
torch.save(__a , __a )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = WEIGHTS_NAME ):
'''simple docstring'''
_snake_case = convert_file_size_to_int(__a )
_snake_case = []
_snake_case = {}
_snake_case = 0
_snake_case = 0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case = flatten_dict(__a , sep="/" )
_snake_case = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case = get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
_snake_case = content
else:
_snake_case = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case = torch.tensor(__a )
_snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case = rename_base_flax_keys(tuple(key.split("/" ) ) , __a )
_snake_case = "/".join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case = os.path.join(
__a , weights_name.replace(".bin" , f'''-{len(__a )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case = {}
_snake_case = 0
_snake_case = raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case = os.path.join(__a , weights_name.replace(".bin" , f'''-{len(__a )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case = {}
_snake_case = {}
for idx, shard in enumerate(__a ):
_snake_case = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(__a ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_snake_case = os.path.join(__a , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__a , os.path.join(__a , __a ) )
_snake_case = shard
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {"total_size": total_size}
_snake_case = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__a , __a ) , "w" , encoding="utf-8" ) as f:
_snake_case = json.dumps(__a , indent=2 , sort_keys=__a ) + "\n"
f.write(__a )
return metadata, index
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__magic_name__ : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case = TaTokenizer.from_pretrained("t5-small" )
_snake_case = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case = tokenizer(__a , return_tensors="pt" ).input_ids
_snake_case = model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 672 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowercase = logging.getLogger(__name__)
@dataclass
class _lowercase :
_lowercase : Optional[int] = 42
_lowercase : Any = 42
_lowercase : Optional[int] = 42
@dataclass
class _lowercase :
_lowercase : Optional[int] = 42
_lowercase : int = 42
_lowercase : Any = None
_lowercase : Dict = None
class _lowercase ( lowercase__ ):
_lowercase : int = 'train'
_lowercase : Any = 'dev'
_lowercase : Any = 'test'
class _lowercase :
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : str="[CLS]" , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : Union[str, Any]="[SEP]" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=0 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : str=-1_0_0 , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Dict=True , ) -> List[InputFeatures]:
"""simple docstring"""
A_ = {label: i for i, label in enumerate(lowercase_ )}
A_ = []
for ex_index, example in enumerate(lowercase_ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , lowercase_ , len(lowercase_ ) )
A_ = []
A_ = []
for word, label in zip(example.words , example.labels ):
A_ = tokenizer.tokenize(lowercase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase_ ) > 0:
tokens.extend(lowercase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A_ = tokenizer.num_special_tokens_to_add()
if len(lowercase_ ) > max_seq_length - special_tokens_count:
A_ = tokens[: (max_seq_length - special_tokens_count)]
A_ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A_ = [sequence_a_segment_id] * len(lowercase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A_ = [cls_token] + tokens
A_ = [pad_token_label_id] + label_ids
A_ = [cls_token_segment_id] + segment_ids
A_ = tokenizer.convert_tokens_to_ids(lowercase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A_ = [1 if mask_padding_with_zero else 0] * len(lowercase_ )
# Zero-pad up to the sequence length.
A_ = max_seq_length - len(lowercase_ )
if pad_on_left:
A_ = ([pad_token] * padding_length) + input_ids
A_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A_ = ([pad_token_segment_id] * padding_length) + segment_ids
A_ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase_ ) == max_seq_length
assert len(lowercase_ ) == max_seq_length
assert len(lowercase_ ) == max_seq_length
assert len(lowercase_ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(lowercase_ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(lowercase_ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(lowercase_ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(lowercase_ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(lowercase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A_ = None
features.append(
InputFeatures(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , label_ids=lowercase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowercase ( lowercase__ ):
_lowercase : Optional[int] = 42
_lowercase : Optional[int] = nn.CrossEntropyLoss().ignore_index
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict = None , lowerCamelCase__ : Any=False , lowerCamelCase__ : str = Split.train , ) -> str:
"""simple docstring"""
A_ = os.path.join(
lowercase_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(lowercase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
A_ = torch.load(lowercase_ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
A_ = token_classification_task.read_examples_from_file(lowercase_ , lowercase_ )
# TODO clean up all this to leverage built-in features of tokenizers
A_ = token_classification_task.convert_examples_to_features(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , lowerCamelCase__ : str ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowercase :
_lowercase : Dict = 42
_lowercase : Dict = -100
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] = None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any] = Split.train , ) -> Union[str, Any]:
"""simple docstring"""
A_ = token_classification_task.read_examples_from_file(lowercase_ , lowercase_ )
# TODO clean up all this to leverage built-in features of tokenizers
A_ = token_classification_task.convert_examples_to_features(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A_ = tf.data.Dataset.from_generator(
lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A_ = tf.data.Dataset.from_generator(
lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
A_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[int] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 203 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase = x_den * y_den * z_den
lowerCAmelCase = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 35 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = 42
lowerCAmelCase = Fraction(0 )
lowerCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase = x_num * y_den + x_den * y_num
lowerCAmelCase = x_den * y_den
lowerCAmelCase = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
lowerCAmelCase = int(sqrt(__a ) )
lowerCAmelCase = int(sqrt(__a ) )
lowerCAmelCase = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
lowerCAmelCase = x_num * y_num
lowerCAmelCase = x_den * y_num + x_num * y_den
lowerCAmelCase = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
lowerCAmelCase = x_num * x_num * y_num * y_num
lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
lowerCAmelCase = int(sqrt(__a ) )
lowerCAmelCase = int(sqrt(__a ) )
lowerCAmelCase = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''') | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if not numbers:
return 0
if not isinstance(__a , (list, tuple) ) or not all(
isinstance(__a , __a ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
lowercase : Optional[Any] = numbers[0]
for i in range(1 , len(__a ) ):
# update the maximum and minimum subarray products
lowercase : Any = numbers[i]
if number < 0:
lowercase , lowercase : Optional[Any] = min_till_now, max_till_now
lowercase : Optional[Any] = max(__a , max_till_now * number )
lowercase : List[str] = min(__a , min_till_now * number )
# update the maximum product found till now
lowercase : List[Any] = max(__a , __a )
return max_prod
| 336 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase : int = logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( lowercase__):
'''simple docstring'''
_snake_case = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **a_ ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase : Tuple = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase : int = kwargs.pop("torchscript" , self.torchscript )
lowercase : Any = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowercase : Any = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
_snake_case = field(default=lowercase__ , metadata={'''help''': '''Trace the models using torchscript'''})
_snake_case = field(default=lowercase__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
_snake_case = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def a__ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowercase : int = torch.device("cpu" )
lowercase : str = 0
elif is_torch_tpu_available():
lowercase : Union[str, Any] = xm.xla_device()
lowercase : str = 0
else:
lowercase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase : int = torch.cuda.device_count()
return device, n_gpu
@property
def a__ ( self ) -> List[Any]:
return is_torch_tpu_available() and self.tpu
@property
def a__ ( self ) -> int:
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a__ ( self ) -> "torch.device":
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def a__ ( self ) -> List[str]:
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def a__ ( self ) -> List[str]:
return self.n_gpu > 0
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE : str = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase_ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : int = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : Any = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , np.asarray(transpose(lowercase_ ) ) ) )
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.reshape(lowercase_ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , np.reshape(lowercase_ , (12, 5) ) ) )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , reshape(lowercase_ , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : Dict = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , reshape(lowercase_ , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.asarray(reshape(lowercase_ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE : List[str] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , np.asarray(reshape(lowercase_ , (12, 5) ) ) ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.squeeze(lowercase_ ) ) )
_SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.squeeze(lowercase_ , axis=2 ) ) )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE : Any = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE : List[Any] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE : Dict = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE : Dict = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.asarray(squeeze(lowercase_ ) ) ) )
_SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.asarray(squeeze(lowercase_ , axis=2 ) ) ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.expand_dims(lowercase_ , axis=1 ) ) )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : int = tf.constant(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.asarray(expand_dims(lowercase_ , axis=1 ) ) ) )
| 533 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase = 'A red cat sitting on a park bench'
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase_ , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
UpperCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase = 'A red cat sitting on a park bench'
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowercase_ , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 430 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = ["pixel_values"]
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**lowercase_ )
_snake_case = size if size is not None else {"""shortest_edge""": 256}
_snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case = get_size_dict(lowercase_ , param_name="""crop_size""" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case = get_resize_output_image_size(lowercase_ , size=size["""shortest_edge"""] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> Tuple:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowercase_ , param_name="""crop_size""" )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_snake_case = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase_ ):
_snake_case = target_sizes.numpy()
_snake_case = []
for idx in range(len(lowercase_ ) ):
_snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase_ )
_snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
_snake_case = logits.argmax(dim=1 )
_snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 585 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase (datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : List[Any] = """utf-8"""
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Dict = True # deprecated
_SCREAMING_SNAKE_CASE : List[str] = None # deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = 10 << 20 # 10MB
_SCREAMING_SNAKE_CASE : List[str] = None
class UpperCamelCase (datasets.ArrowBasedBuilder ):
_SCREAMING_SNAKE_CASE : List[Any] = JsonConfig
def __snake_case ( self :Optional[Any] ) ->List[str]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
lowercase : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self :Union[str, Any] , __magic_name__ :int ) ->int:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
lowercase : Any = data_files
if isinstance(lowercase_ , lowercase_ ):
lowercase : Union[str, Any] = [files]
lowercase : int = [dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowercase : int = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
lowercase : int = [files]
lowercase : str = [dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={"""files""": files} ) )
return splits
def __snake_case ( self :Tuple , __magic_name__ :Union[str, Any] ) ->pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase : Any = self.config.features.arrow_schema.field(lowercase_ ).type
lowercase : Optional[Any] = pa_table.append_column(lowercase_ , pa.array([None] * len(lowercase_ ) , type=lowercase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase : Tuple = table_cast(lowercase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self :Optional[Any] , __magic_name__ :int ) ->Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase : Dict = json.load(lowercase_ )
# We keep only the field we are interested in
lowercase : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowercase_ , (list, tuple) ):
lowercase : Any = set().union(*[row.keys() for row in dataset] )
lowercase : int = {col: [row.get(lowercase_ ) for row in dataset] for col in keys}
else:
lowercase : Union[str, Any] = dataset
lowercase : Dict = pa.Table.from_pydict(lowercase_ )
yield file_idx, self._cast_table(lowercase_ )
# If the file has one json object per line
else:
with open(lowercase_ , """rb""" ) as f:
lowercase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
lowercase : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
lowercase : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowercase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase : Optional[int] = batch.decode(self.config.encoding , errors=lowercase_ ).encode("""utf-8""" )
try:
while True:
try:
lowercase : Tuple = paj.read_json(
io.BytesIO(lowercase_ ) , read_options=paj.ReadOptions(block_size=lowercase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowercase_ , pa.ArrowInvalid )
and "straddling" not in str(lowercase_ )
or block_size > len(lowercase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(lowercase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase : str = json.load(lowercase_ )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowercase_ , lowercase_ ): # list is the only sequence type supported in JSON
try:
lowercase : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase : int = {col: [row.get(lowercase_ ) for row in dataset] for col in keys}
lowercase : Tuple = pa.Table.from_pydict(lowercase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowercase_ )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase_ )
batch_idx += 1
| 264 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : str , lowercase_ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowercase = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {', '.join(__a )}"""
)
raise ValueError(__a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase = object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase = object()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__a ) - len(__a ) + 1 ):
UpperCAmelCase__ : Tuple = [x.match(__a ) for x, y in zip(__a , ks[i:] )]
if matches and all(__a ):
return True
return False
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def replace(__UpperCamelCase , __UpperCamelCase ):
for rule, replacement in rules:
if _match(__a , __a ):
return replacement
return val
return replace
def lowerCAmelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __a )),
(("transformer", "wte", "embedding"), P("""mp""" , __a )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__a , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __a )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__a , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __a )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = _get_partition_rules()
UpperCAmelCase__ : Optional[int] = _replacement_rules(__a )
UpperCAmelCase__ : Optional[int] = {k: _unmatched for k in flatten_dict(__a )}
UpperCAmelCase__ : Dict = {k: replace(__a , __a ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__a ) )
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__magic_name__ : List[str] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__magic_name__ : Dict = F'https://www.google.com/search?q={query}&num=100'
__magic_name__ : int = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__magic_name__ : Dict = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__magic_name__ : str = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)['url'][0]
webbrowser.open(link)
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
from __future__ import annotations
import queue
class _lowercase :
def __init__( self : List[Any] , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
A_ = data
A_ = None
A_ = None
def _lowerCamelCase ( ):
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
A_ = input('''Enter the value of the root node: ''' ).strip().lower()
A_ = queue.Queue()
A_ = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
A_ = q.get()
A_ = f"Enter the left node of {node_found.data}: "
A_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
A_ = TreeNode(int(__a ) )
A_ = left_node
q.put(__a )
A_ = f"Enter the right node of {node_found.data}: "
A_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
A_ = TreeNode(int(__a ) )
A_ = right_node
q.put(__a )
raise
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = queue.Queue()
q.put(__a )
while not q.empty():
A_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = queue.Queue()
q.put(__a )
while not q.empty():
A_ = []
while not q.empty():
A_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = []
A_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
A_ = n.left
# end of while means current node doesn't have left child
A_ = stack.pop()
# start to traverse its right child
A_ = n.right
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = []
A_ = node
while n or stack:
while n:
stack.append(__a )
A_ = n.left
A_ = stack.pop()
print(n.data , end=''',''' )
A_ = n.right
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ ,A_ = [], []
A_ = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
A_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
A_ ,A_ = divmod(width - len(__a ) - 2 , 2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
__lowercase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 203 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case( lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = BlenderbotSmallTokenizer
UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
lowerCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def __snake_case ( self , **A_ ) -> int:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __snake_case ( self , A_ ) -> Tuple:
lowerCAmelCase = """adapt act apte"""
lowerCAmelCase = """adapt act apte"""
return input_text, output_text
def __snake_case ( self ) -> Dict:
lowerCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """adapt act apte"""
lowerCAmelCase = ["""adapt""", """act""", """ap@@""", """te"""]
lowerCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowerCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
lowerCAmelCase = """I am a small frog."""
lowerCAmelCase = tok([src_text] , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowerCAmelCase = tok.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
lowerCAmelCase = """I am a small frog ."""
lowerCAmelCase = """."""
lowerCAmelCase = tok(lowercase_ )["""input_ids"""]
lowerCAmelCase = tok(lowercase_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0] | 433 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = get_failure_array(__a )
# 2) Step through text searching for pattern
lowercase , lowercase : Tuple = 0, 0 # index into text, pattern
while i < len(__a ):
if pattern[j] == text[i]:
if j == (len(__a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Optional[Any] = [0]
lowercase : str = 0
lowercase : Union[str, Any] = 1
while j < len(__a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase : Any = failure[i - 1]
continue
j += 1
failure.append(__a )
return failure
if __name__ == "__main__":
# Test 1)
lowercase : List[Any] = 'abc1abc12'
lowercase : List[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase : Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase : Any = 'ABABX'
lowercase : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowercase : Dict = 'AAAB'
lowercase : Optional[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowercase : Union[str, Any] = 'abcdabcy'
lowercase : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowercase : Optional[Any] = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 336 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
def _A ( A ) -> Any:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(__a ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_SCREAMING_SNAKE_CASE : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_SCREAMING_SNAKE_CASE : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = []
for _ in range(__a ):
# Create output image
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
_SCREAMING_SNAKE_CASE : Tuple = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
_SCREAMING_SNAKE_CASE : Dict = 255 - cells[y][x] * 255
_SCREAMING_SNAKE_CASE : Any = (colour, colour, colour)
# Save image
images.append(__a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_generation(__a )
return images
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 533 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = SwinConfig()
UpperCamelCase = swin_name.split('_' )
UpperCamelCase = name_split[1]
UpperCamelCase = int(name_split[4] )
UpperCamelCase = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase = 96
UpperCamelCase = (2, 2, 6, 2)
UpperCamelCase = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase = 96
UpperCamelCase = (2, 2, 18, 2)
UpperCamelCase = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase = 128
UpperCamelCase = (2, 2, 18, 2)
UpperCamelCase = (4, 8, 16, 32)
else:
UpperCamelCase = 192
UpperCamelCase = (2, 2, 18, 2)
UpperCamelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase = 21_841
else:
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = img_size
UpperCamelCase = num_classes
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
return config
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase = 'layernorm.bias'
if "head" in name:
UpperCamelCase = name.replace('head' , 'classifier' )
else:
UpperCamelCase = 'swin.' + name
return name
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase = key.split('.' )
UpperCamelCase = int(key_split[1] )
UpperCamelCase = int(key_split[3] )
UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = timm.create_model(__a , pretrained=__a )
timm_model.eval()
UpperCamelCase = get_swin_config(__a )
UpperCamelCase = SwinForImageClassification(__a )
model.eval()
UpperCamelCase = convert_state_dict(timm_model.state_dict() , __a )
model.load_state_dict(__a )
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase = Image.open(requests.get(__a , stream=__a ).raw )
UpperCamelCase = image_processor(images=__a , return_tensors='pt' )
UpperCamelCase = timm_model(inputs['pixel_values'] )
UpperCamelCase = model(**__a ).logits
assert torch.allclose(__a , __a , atol=1e-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = "yolos"
def __init__(self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=[512, 864] , UpperCAmelCase=16 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=100 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> List[str]:
super().__init__(**lowercase_ )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("1.11" )
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase (self ) -> float:
return 1e-4
@property
def lowercase (self ) -> int:
return 12 | 585 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( _A , _A , _A ) -> Union[str, Any]:
lowercase : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(__a , config=__a )
lowercase : List[Any] = downstream_dict["""projector.weight"""]
lowercase : List[str] = downstream_dict["""projector.bias"""]
lowercase : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
lowercase : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCamelCase ( _A , _A , _A ) -> Dict:
lowercase : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(__a , config=__a )
lowercase : str = downstream_dict["""model.linear.weight"""]
lowercase : str = downstream_dict["""model.linear.bias"""]
return model
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
lowercase : Union[str, Any] = WavaVecaForXVector.from_pretrained(__a , config=__a )
lowercase : Dict = downstream_dict["""connector.weight"""]
lowercase : Tuple = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase : str = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase : List[str] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowercase : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowercase : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowercase : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowercase : List[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> Any:
lowercase : Optional[int] = torch.load(__a , map_location="""cpu""" )
lowercase : Optional[Any] = checkpoint["""Downstream"""]
lowercase : Optional[Any] = WavaVecaConfig.from_pretrained(__a )
lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(
__a , return_attention_mask=__a , do_normalize=__a )
lowercase : List[str] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowercase : str = convert_classification(__a , __a , __a )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowercase : List[str] = convert_diarization(__a , __a , __a )
elif arch.endswith("""ForXVector""" ):
lowercase : List[Any] = convert_xvector(__a , __a , __a )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase : Dict = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 264 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase = None ) -> Dict:
'''simple docstring'''
lowercase = value
lowercase = None # Added in order to delete a node easier
lowercase = None
lowercase = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase = None ) -> int:
'''simple docstring'''
lowercase = root
def __str__( self ) -> str:
'''simple docstring'''
return str(self.root )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowercase = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowercase = new_children
else:
lowercase = new_children
else:
lowercase = new_children
def _a ( self , _lowerCAmelCase ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _a ( self ) -> bool:
'''simple docstring'''
return self.root is None
def _a ( self , _lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowercase = new_node # set its root
else: # Tree is not empty
lowercase = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowercase = new_node # We insert the new node in a leaf
break
else:
lowercase = parent_node.left
else:
if parent_node.right is None:
lowercase = new_node
break
else:
lowercase = parent_node.right
lowercase = parent_node
def _a ( self , *_lowerCAmelCase ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowercase_ )
def _a ( self , _lowerCAmelCase ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
lowercase = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowercase = node.left if value < node.value else node.right
return node
def _a ( self , _lowerCAmelCase = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowercase = self.root
if not self.empty():
while node.right is not None:
lowercase = node.right
return node
def _a ( self , _lowerCAmelCase = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowercase = self.root
if self.root is None:
return None
if not self.empty():
lowercase = self.root
while node.left is not None:
lowercase = node.left
return node
def _a ( self , _lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left )
else:
lowercase = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowercase = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _a ( self , _lowerCAmelCase ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _a ( self , _lowerCAmelCase=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
'''simple docstring'''
if node:
self.inorder(lowercase_ , node.left )
arr.append(node.value )
self.inorder(lowercase_ , node.right )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = []
self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( lowercase_ : Node | None ):
lowercase = []
if curr_node is not None:
lowercase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ):
lowercase = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowercase = BinarySearchTree()
for i in testlist:
t.insert(__a )
# Prints all the elements of the list in order traversal
print(__a )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__a )
print(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 588 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__UpperCAmelCase = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__UpperCAmelCase = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Tuple = numpy_to_pil(__a )
return images
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase__ : Tuple = images[None, ...]
UpperCAmelCase__ : Optional[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase__ : Optional[int] = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
UpperCAmelCase__ : Tuple = [Image.fromarray(__a ) for image in images]
return pil_images
| 65 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__magic_name__ : List[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_snake_case = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 672 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = len(__a )
for i in range(1 , __a ):
A_ = collection[i]
A_ = 0
A_ = i - 1
while low <= high:
A_ = (low + high) // 2
if val < collection[mid]:
A_ = mid - 1
else:
A_ = mid + 1
for j in range(__a , __a , -1 ):
A_ = collection[j - 1]
A_ = val
return collection
if __name__ == "__main__":
__lowercase = input("""Enter numbers separated by a comma:\n""").strip()
__lowercase = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 203 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case( lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = KandinskyVaaPriorPipeline
UpperCAmelCase : Optional[Any] = ["prompt"]
UpperCAmelCase : str = ["prompt", "negative_prompt"]
UpperCAmelCase : List[Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
UpperCAmelCase : Tuple = False
@property
def __snake_case ( self ) -> Optional[int]:
return 32
@property
def __snake_case ( self ) -> Tuple:
return 32
@property
def __snake_case ( self ) -> int:
return self.time_input_dim
@property
def __snake_case ( self ) -> str:
return self.time_input_dim * 4
@property
def __snake_case ( self ) -> Optional[int]:
return 100
@property
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def __snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
lowerCAmelCase = PriorTransformer(**lowercase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __snake_case ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase = CLIPVisionModelWithProjection(lowercase_ )
return model
@property
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.dummy_prior
lowerCAmelCase = self.dummy_image_encoder
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_image_processor
lowerCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=1_0.0 , )
lowerCAmelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def __snake_case ( self , A_ , A_=0 ) -> Tuple:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __snake_case ( self ) -> int:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase_ )
lowerCAmelCase = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowerCAmelCase = output.image_embeds
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
lowerCAmelCase = image[0, -10:]
lowerCAmelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = True
lowerCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , ) | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : int = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase : str = 128
elif "12-12" in model_name:
lowercase : str = 12
lowercase : Dict = 12
elif "14-14" in model_name:
lowercase : Any = 14
lowercase : Any = 14
elif "16-16" in model_name:
lowercase : Optional[Any] = 16
lowercase : List[str] = 16
else:
raise ValueError("""Model not supported""" )
lowercase : Optional[Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
lowercase : Optional[Any] = 35
lowercase : List[Any] = """speech-commands-v2-id2label.json"""
else:
lowercase : Any = 527
lowercase : Optional[int] = """audioset-id2label.json"""
lowercase : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
lowercase : List[str] = {int(__a ): v for k, v in idalabel.items()}
lowercase : Union[str, Any] = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
if "module.v" in name:
lowercase : List[Any] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowercase : Dict = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowercase : Union[str, Any] = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowercase : Optional[Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowercase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Optional[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : int = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase : List[str] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowercase : str = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowercase : Any = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowercase : Any = orig_state_dict.pop(__a )
if "qkv" in key:
lowercase : Dict = key.split(""".""" )
lowercase : int = int(key_split[3] )
lowercase : List[Any] = config.hidden_size
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : Any = val[dim : dim * 2, :]
lowercase : Union[str, Any] = val[-dim:, :]
else:
lowercase : List[Any] = val[:dim]
lowercase : Tuple = val[dim : dim * 2]
lowercase : int = val[-dim:]
else:
lowercase : Optional[int] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Optional[Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
lowercase : Dict = get_audio_spectrogram_transformer_config(__a )
lowercase : Optional[Any] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowercase : Tuple = model_name_to_url[model_name]
lowercase : Union[str, Any] = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )
# remove some keys
remove_keys(__a )
# rename some keys
lowercase : Any = convert_state_dict(__a , __a )
# load 🤗 model
lowercase : List[Any] = ASTForAudioClassification(__a )
model.eval()
model.load_state_dict(__a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase : Union[str, Any] = -4.2677393 if """speech-commands""" not in model_name else -6.845978
lowercase : Optional[int] = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
lowercase : Optional[int] = 1_024 if """speech-commands""" not in model_name else 128
lowercase : Any = ASTFeatureExtractor(mean=__a , std=__a , max_length=__a )
if "speech-commands" in model_name:
lowercase : Dict = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowercase : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
else:
lowercase : Any = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowercase , lowercase : Optional[Any] = torchaudio.load(__a )
lowercase : Any = waveform.squeeze().numpy()
lowercase : Optional[int] = feature_extractor(__a , sampling_rate=16_000 , return_tensors="""pt""" )
# forward pass
lowercase : Tuple = model(**__a )
lowercase : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase : Any = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase : List[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase : str = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase : Optional[int] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase : Optional[Any] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase : List[str] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __a , atol=1e-4 ):
raise ValueError("""Logits don\'t match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(__a )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_=1_2 , a_=7 , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=3_2 , a_=2 , a_=4 , a_=3_7 , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=0.02 , a_=0 , a_=None , ) -> Union[str, Any]:
lowercase : int = parent
lowercase : List[Any] = batch_size
lowercase : Any = seq_length
lowercase : List[Any] = is_training
lowercase : Tuple = use_input_mask
lowercase : int = use_labels
lowercase : List[str] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[str] = projection_dim
lowercase : Dict = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : Any = dropout
lowercase : Optional[Any] = attention_dropout
lowercase : Tuple = max_position_embeddings
lowercase : List[str] = initializer_range
lowercase : Optional[int] = scope
lowercase : Optional[int] = bos_token_id
def a__ ( self ) -> int:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase : Optional[int] = input_mask.numpy()
lowercase , lowercase : List[str] = input_mask.shape
lowercase : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
lowercase : Optional[int] = 1
lowercase : List[str] = 0
lowercase : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowercase_ )
def a__ ( self ) -> List[Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def a__ ( self , a_ , a_ , a_ ) -> List[Any]:
lowercase : Any = TFBlipTextModel(config=lowercase_ )
lowercase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , training=lowercase_ )
lowercase : List[Any] = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self ) -> List[str]:
lowercase : Any = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] = config_and_inputs
lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowercase__ , unittest.TestCase):
'''simple docstring'''
_snake_case = (TFBlipTextModel,) if is_tf_available() else ()
_snake_case = False
_snake_case = False
_snake_case = False
def a__ ( self ) -> Optional[Any]:
lowercase : int = BlipTextModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def a__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def a__ ( self ) -> int:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Dict:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def a__ ( self ) -> Dict:
pass
@slow
def a__ ( self ) -> Optional[int]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = TFBlipTextModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def a__ ( self , a_=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowercase_ )
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 200_0000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [0 for i in range(n + 1 )]
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __a ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in range(__a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 533 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase : List[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : int=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=None , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'height': 2_0, 'width': 2_0}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCamelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def A ( self : Any ):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = 2_0_4_8
UpperCamelCase = image_processor(lowercase_ , return_tensors='pt' , max_patches=lowercase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase_ ):
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
UpperCamelCase = 'Hello'
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase = 3
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 430 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Optional[int] = "MobileNetV1Config"
# Base docstring
UpperCAmelCase_ : str = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : Tuple = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Optional[Any] = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : str = "tabby, tabby cat"
UpperCAmelCase_ : Union[str, Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
__magic_name__ : Optional[int] ={}
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =model.mobilenet_va
else:
__magic_name__ : str =model
__magic_name__ : Tuple ="""MobilenetV1/Conv2d_0/"""
__magic_name__ : str =backbone.conv_stem.convolution.weight
__magic_name__ : Any =backbone.conv_stem.normalization.bias
__magic_name__ : Union[str, Any] =backbone.conv_stem.normalization.weight
__magic_name__ : Optional[Any] =backbone.conv_stem.normalization.running_mean
__magic_name__ : Any =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__magic_name__ : Optional[Any] =i + 1
__magic_name__ : List[str] =i * 2
__magic_name__ : List[str] =backbone.layer[pt_index]
__magic_name__ : Any =F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__magic_name__ : Any =pointer.convolution.weight
__magic_name__ : List[str] =pointer.normalization.bias
__magic_name__ : Optional[Any] =pointer.normalization.weight
__magic_name__ : List[str] =pointer.normalization.running_mean
__magic_name__ : List[str] =pointer.normalization.running_var
__magic_name__ : int =backbone.layer[pt_index + 1]
__magic_name__ : List[Any] =F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__magic_name__ : Optional[Any] =pointer.convolution.weight
__magic_name__ : int =pointer.normalization.bias
__magic_name__ : List[str] =pointer.normalization.weight
__magic_name__ : Optional[Any] =pointer.normalization.running_mean
__magic_name__ : Optional[int] =pointer.normalization.running_var
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] ="""MobilenetV1/Logits/Conv2d_1c_1x1/"""
__magic_name__ : str =model.classifier.weight
__magic_name__ : str =model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__magic_name__ : Optional[int] =tf.train.list_variables(lowerCamelCase )
__magic_name__ : Dict ={}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__magic_name__ : Optional[Any] =tf.train.load_variable(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =array
# Build TF to PyTorch weights loading map
__magic_name__ : int =_build_tf_to_pytorch_map(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__magic_name__ : Optional[Any] =tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__magic_name__ : Union[str, Any] =np.transpose(lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ : str =array.squeeze().transpose()
else:
__magic_name__ : List[Any] =np.transpose(lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__magic_name__ : Optional[Any] =torch.from_numpy(lowerCamelCase )
tf_weights.pop(lowerCamelCase , lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ : int =features.shape[-2:]
__magic_name__ , __magic_name__ : Tuple =conv_layer.stride
__magic_name__ , __magic_name__ : str =conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ : List[str] =max(kernel_height - stride_height , 0 )
else:
__magic_name__ : int =max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__magic_name__ : int =max(kernel_width - stride_width , 0 )
else:
__magic_name__ : List[Any] =max(kernel_width - (in_width % stride_width) , 0 )
__magic_name__ : int =pad_along_width // 2
__magic_name__ : Union[str, Any] =pad_along_width - pad_left
__magic_name__ : Union[str, Any] =pad_along_height // 2
__magic_name__ : str =pad_along_height - pad_top
__magic_name__ : int =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase , lowerCamelCase , """constant""" , 0.0 )
class __A ( nn.Module ):
def __init__( self :Tuple , __snake_case :MobileNetVaConfig , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :Optional[int] = 1 , __snake_case :Optional[int] = 1 , __snake_case :bool = False , __snake_case :Optional[bool] = True , __snake_case :Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
__magic_name__ : Union[str, Any] =config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
__magic_name__ : List[Any] =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ : Union[str, Any] =nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case , groups=__snake_case , bias=__snake_case , padding_mode="""zeros""" , )
if use_normalization:
__magic_name__ : Any =nn.BatchNormad(
num_features=__snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=__snake_case , track_running_stats=__snake_case , )
else:
__magic_name__ : int =None
if use_activation:
if isinstance(__snake_case , __snake_case ):
__magic_name__ : int =ACTaFN[use_activation]
elif isinstance(config.hidden_act , __snake_case ):
__magic_name__ : List[Any] =ACTaFN[config.hidden_act]
else:
__magic_name__ : Tuple =config.hidden_act
else:
__magic_name__ : Any =None
def A__ ( self :Union[str, Any] , __snake_case :torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
__magic_name__ : Tuple =apply_tf_padding(__snake_case , self.convolution )
__magic_name__ : int =self.convolution(__snake_case )
if self.normalization is not None:
__magic_name__ : Optional[Any] =self.normalization(__snake_case )
if self.activation is not None:
__magic_name__ : Dict =self.activation(__snake_case )
return features
class __A ( UpperCamelCase__ ):
UpperCamelCase = MobileNetVaConfig
UpperCamelCase = load_tf_weights_in_mobilenet_va
UpperCamelCase = """mobilenet_v1"""
UpperCamelCase = """pixel_values"""
UpperCamelCase = False
def A__ ( self :Dict , __snake_case :Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase_ : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __snake_case :MobileNetVaConfig , __snake_case :bool = True ):
'''simple docstring'''
super().__init__(__snake_case )
__magic_name__ : Tuple =config
__magic_name__ : int =32
__magic_name__ : Optional[int] =max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ : str =MobileNetVaConvLayer(
__snake_case , in_channels=config.num_channels , out_channels=__snake_case , kernel_size=3 , stride=2 , )
__magic_name__ : List[Any] =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ : List[str] =nn.ModuleList()
for i in range(13 ):
__magic_name__ : Optional[int] =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ : Dict =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=3 , stride=strides[i] , groups=__snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=1 , ) )
__magic_name__ : Optional[Any] =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A__ ( self :Optional[int] , __snake_case :Optional[int] ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self :Union[str, Any] , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ : str =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__magic_name__ : Any =self.conv_stem(__snake_case )
__magic_name__ : Optional[int] =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ : Tuple =layer_module(__snake_case )
if output_hidden_states:
__magic_name__ : int =all_hidden_states + (hidden_states,)
__magic_name__ : Optional[int] =hidden_states
if self.pooler is not None:
__magic_name__ : str =torch.flatten(self.pooler(__snake_case ) , start_dim=1 )
else:
__magic_name__ : Tuple =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=__snake_case , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__( self :int , __snake_case :MobileNetVaConfig ):
'''simple docstring'''
super().__init__(__snake_case )
__magic_name__ : Any =config.num_labels
__magic_name__ : List[Any] =MobileNetVaModel(__snake_case )
__magic_name__ : int =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ : List[Any] =nn.Dropout(config.classifier_dropout_prob , inplace=__snake_case )
__magic_name__ : Dict =nn.Linear(__snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self :List[str] , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : Any =self.mobilenet_va(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
__magic_name__ : Any =outputs.pooler_output if return_dict else outputs[1]
__magic_name__ : List[Any] =self.classifier(self.dropout(__snake_case ) )
__magic_name__ : List[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ : Any ="""regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ : Union[str, Any] ="""single_label_classification"""
else:
__magic_name__ : Tuple ="""multi_label_classification"""
if self.config.problem_type == "regression":
__magic_name__ : List[str] =MSELoss()
if self.num_labels == 1:
__magic_name__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ : int =loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
__magic_name__ : List[str] =CrossEntropyLoss()
__magic_name__ : Optional[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ : Dict =BCEWithLogitsLoss()
__magic_name__ : Tuple =loss_fct(__snake_case , __snake_case )
if not return_dict:
__magic_name__ : int =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , )
| 21 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCAmelCase_ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ : List[Any] = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase_ : List[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Union[str, Any] = Path(tmpdirname)
UpperCAmelCase_ : Tuple = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCAmelCase_ : List[str] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCAmelCase_ : List[str] = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCAmelCase_ : Tuple = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase_ : List[Any] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase_ : List[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCAmelCase_ : str = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCAmelCase_ : str = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 21 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 1 |
import cmath
import math
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =math.radians(lowerCamelCase )
__magic_name__ : Union[str, Any] =math.radians(lowerCamelCase )
# Convert voltage and current to rectangular form
__magic_name__ : int =cmath.rect(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] =cmath.rect(lowerCamelCase , lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A ( UpperCamelCase__ ):
@slow
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__magic_name__ : int =BertTokenizer.from_pretrained("""bert-base-uncased""" )
__magic_name__ : str =bertabert.config.encoder.vocab_size
__magic_name__ : Union[str, Any] =tokenizer.sep_token_id
__magic_name__ : Optional[Any] =tokenizer.cls_token_id
__magic_name__ : str =1_28
__magic_name__ : List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__magic_name__ : Union[str, Any] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__magic_name__ : Union[str, Any] =train_dataset.select(range(32 ) )
__magic_name__ : str =val_dataset.select(range(16 ) )
__magic_name__ : int =4
def _map_to_encoder_decoder_inputs(__snake_case :Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : Dict =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__snake_case , max_length=5_12 )
__magic_name__ : Dict =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__snake_case , max_length=1_28 )
__magic_name__ : Optional[int] =inputs.input_ids
__magic_name__ : Tuple =inputs.attention_mask
__magic_name__ : Any =outputs.input_ids
__magic_name__ : Tuple =outputs.input_ids.copy()
__magic_name__ : Union[str, Any] =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__magic_name__ : List[str] =outputs.attention_mask
assert all(len(__snake_case ) == 5_12 for x in inputs.input_ids )
assert all(len(__snake_case ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case :Tuple ):
__magic_name__ : Tuple =pred.label_ids
__magic_name__ : Any =pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Optional[int] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Dict =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__magic_name__ : List[str] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__magic_name__ : Tuple =self.get_auto_remove_tmp_dir()
__magic_name__ : Optional[int] =SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy="""steps""" , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : List[str] =SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 21 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = """upernet"""
def __init__( self :int , __snake_case :int=None , __snake_case :Optional[int]=5_12 , __snake_case :Any=0.02 , __snake_case :str=[1, 2, 3, 6] , __snake_case :Optional[Any]=True , __snake_case :Optional[Any]=0.4 , __snake_case :Tuple=3_84 , __snake_case :Optional[int]=2_56 , __snake_case :Dict=1 , __snake_case :Any=False , __snake_case :Tuple=2_55 , **__snake_case :int , ):
'''simple docstring'''
super().__init__(**__snake_case )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__magic_name__ : Optional[Any] =CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : int =backbone_config.get("""model_type""" )
__magic_name__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
__magic_name__ : List[Any] =config_class.from_dict(__snake_case )
__magic_name__ : Dict =backbone_config
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Tuple =pool_scales
__magic_name__ : Optional[Any] =use_auxiliary_head
__magic_name__ : List[str] =auxiliary_loss_weight
__magic_name__ : int =auxiliary_in_channels
__magic_name__ : Optional[int] =auxiliary_channels
__magic_name__ : Optional[int] =auxiliary_num_convs
__magic_name__ : int =auxiliary_concat_input
__magic_name__ : Optional[Any] =loss_ignore_index
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] =copy.deepcopy(self.__dict__ )
__magic_name__ : Dict =self.backbone_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 21 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__magic_name__ : Dict =tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ : Any =model(__snake_case )["""last_hidden_state"""]
__magic_name__ : Any =tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
__magic_name__ : List[str] =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 21 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : List[str] = 3
class __A ( UpperCamelCase__ ):
pass
def lowerCAmelCase_ ( lowerCamelCase ):
for shard in shards:
for i in range(lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( ):
__magic_name__ : Tuple =int(os.environ["""RANK"""] )
__magic_name__ : List[Any] =int(os.environ["""WORLD_SIZE"""] )
__magic_name__ : Optional[Any] =ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCamelCase )
parser.add_argument("""--local_rank""" , type=lowerCamelCase )
parser.add_argument("""--num_workers""" , type=lowerCamelCase , default=0 )
__magic_name__ : List[str] =parser.parse_args()
__magic_name__ : int =args.streaming
__magic_name__ : Optional[int] =args.num_workers
__magic_name__ : str ={"""shards""": [F"shard_{shard_idx}" for shard_idx in range(lowerCamelCase )]}
__magic_name__ : Optional[Any] =IterableDataset.from_generator(lowerCamelCase , gen_kwargs=lowerCamelCase )
if not streaming:
__magic_name__ : Dict =Dataset.from_list(list(lowerCamelCase ) )
__magic_name__ : Tuple =split_dataset_by_node(lowerCamelCase , rank=lowerCamelCase , world_size=lowerCamelCase )
__magic_name__ : List[Any] =torch.utils.data.DataLoader(lowerCamelCase , num_workers=lowerCamelCase )
__magic_name__ : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD
__magic_name__ : Tuple =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__magic_name__ : Union[str, Any] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 21 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(lowerCamelCase ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Any =(graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =[float("""inf""" )] * vertex_count
__magic_name__ : Optional[int] =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : List[str] =(graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__magic_name__ : Optional[Any] =distance[u] + w
__magic_name__ : Optional[Any] =check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = int(input("Enter number of vertices: ").strip())
UpperCAmelCase_ : Any = int(input("Enter number of edges: ").strip())
UpperCAmelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCAmelCase_ : str = {"src": src, "dst": dest, "weight": weight}
UpperCAmelCase_ : List[Any] = int(input("\nEnter shortest path source:").strip())
UpperCAmelCase_ : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 21 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ : int = 250004
UpperCAmelCase_ : List[str] = 250020
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : List[str] =MBartTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MBartTokenizer(__snake_case , keep_accents=__snake_case )
__magic_name__ : int =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ : Any =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A__ ( self :Tuple ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ : Tuple =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__magic_name__ : List[Any] =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : List[str] =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : Optional[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__magic_name__ : int =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : List[Any] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__magic_name__ : List[str] =tempfile.mkdtemp()
__magic_name__ : Optional[int] =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : int =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__magic_name__ : List[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : List[str] =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ : str =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : Optional[int] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
UpperCamelCase = """facebook/mbart-large-en-ro"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def A__ ( cls :str ):
'''simple docstring'''
__magic_name__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__magic_name__ : Any =1
return cls
def A__ ( self :Any ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
__magic_name__ : Union[str, Any] =[RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__magic_name__ : Optional[int] =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[str] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __snake_case )
__magic_name__ : Dict =10
__magic_name__ : Optional[Any] =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =tempfile.mkdtemp()
__magic_name__ : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__magic_name__ : Dict =MBartTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors="""pt""" )
__magic_name__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__magic_name__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ : int =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors="""pt""" )
__magic_name__ : Tuple =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors="""pt""" )
__magic_name__ : List[Any] =targets["""input_ids"""]
__magic_name__ : List[str] =shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 21 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =1.5
__magic_name__ : Union[str, Any] =int(factor * num_class_images )
__magic_name__ : int =ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=lowerCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ : Optional[Any] =client.query(text=lowerCamelCase )
if len(lowerCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
__magic_name__ : Optional[int] =int(factor * num_images )
__magic_name__ : Tuple =ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase , aesthetic_weight=0.1 , )
__magic_name__ : List[str] =0
__magic_name__ : List[str] =0
__magic_name__ : List[str] =tqdm(desc="""downloading real regularization images""" , total=lowerCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
__magic_name__ : Optional[int] =class_images[count]
count += 1
try:
__magic_name__ : Optional[int] =requests.get(images["""url"""] )
if img.status_code == 200:
__magic_name__ : Optional[Any] =Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase_ ( ):
__magic_name__ : str =argparse.ArgumentParser("""""" , add_help=lowerCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowerCamelCase , type=lowerCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowerCamelCase , type=lowerCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 21 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase__ ):
UpperCamelCase = (IPNDMScheduler,)
UpperCamelCase = (("""num_inference_steps""", 50),)
def A__ ( self :int , **__snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[int] ={"""num_train_timesteps""": 10_00}
config.update(**__snake_case )
return config
def A__ ( self :int , __snake_case :Any=0 , **__snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[int] =dict(self.forward_default_kwargs )
__magic_name__ : List[str] =kwargs.pop("""num_inference_steps""" , __snake_case )
__magic_name__ : str =self.dummy_sample
__magic_name__ : Dict =0.1 * sample
__magic_name__ : Any =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ : int =self.get_scheduler_config(**__snake_case )
__magic_name__ : Optional[int] =scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
__magic_name__ : List[str] =dummy_past_residuals[:]
if time_step is None:
__magic_name__ : Dict =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
__magic_name__ : int =scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
__magic_name__ : Tuple =dummy_past_residuals[:]
__magic_name__ : Optional[int] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : List[str] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : List[str] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] , __snake_case :Optional[Any]=0 , **__snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =dict(self.forward_default_kwargs )
__magic_name__ : Dict =kwargs.pop("""num_inference_steps""" , __snake_case )
__magic_name__ : str =self.dummy_sample
__magic_name__ : Optional[Any] =0.1 * sample
__magic_name__ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ : Dict =self.get_scheduler_config()
__magic_name__ : Optional[Any] =scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ : Union[str, Any] =dummy_past_residuals[:]
if time_step is None:
__magic_name__ : Optional[Any] =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
__magic_name__ : List[Any] =scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ : Tuple =dummy_past_residuals[:]
__magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Union[str, Any] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__ : Tuple =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Optional[Any] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self :Optional[Any] , **__snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.scheduler_classes[0]
__magic_name__ : List[str] =self.get_scheduler_config(**__snake_case )
__magic_name__ : Tuple =scheduler_class(**__snake_case )
__magic_name__ : List[str] =10
__magic_name__ : Optional[Any] =self.dummy_model()
__magic_name__ : Any =self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : Union[str, Any] =model(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : Optional[Any] =model(__snake_case , __snake_case )
__magic_name__ : Optional[Any] =scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =dict(self.forward_default_kwargs )
__magic_name__ : str =kwargs.pop("""num_inference_steps""" , __snake_case )
for scheduler_class in self.scheduler_classes:
__magic_name__ : Union[str, Any] =self.get_scheduler_config()
__magic_name__ : Optional[int] =scheduler_class(**__snake_case )
__magic_name__ : Tuple =self.dummy_sample
__magic_name__ : Tuple =0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , """set_timesteps""" ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , """set_timesteps""" ):
__magic_name__ : Dict =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__magic_name__ : str =dummy_past_residuals[:]
__magic_name__ : Optional[Any] =scheduler.timesteps[5]
__magic_name__ : List[str] =scheduler.timesteps[6]
__magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Tuple =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__magic_name__ : Optional[Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self :Tuple ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.full_loop()
__magic_name__ : str =torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 21 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCAmelCase_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCAmelCase_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCAmelCase_ : set[int] = {ord(char) for char in VALID_CHARS}
UpperCAmelCase_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =""
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
__magic_name__ : int =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[str] =[]
for key in product(lowerCamelCase , repeat=3 ):
__magic_name__ : Any =try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase_ ( lowerCamelCase = "p059_cipher.txt" ):
__magic_name__ : list[int]
__magic_name__ : list[str]
__magic_name__ : str
__magic_name__ : str
__magic_name__ : str =Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding="""utf-8""" )
__magic_name__ : List[str] =[int(lowerCamelCase ) for number in data.strip().split(""",""" )]
__magic_name__ : Any =filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
__magic_name__ : Any =filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
__magic_name__ : Dict =possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 1 |
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *__snake_case :Tuple , **__snake_case :Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *__snake_case :Dict , **__snake_case :List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Dict ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :Optional[Any] , **__snake_case :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[str] , *__snake_case :str , **__snake_case :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :List[Any] , **__snake_case :Tuple ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Tuple , *__snake_case :Any , **__snake_case :Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *__snake_case :List[str] , **__snake_case :List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[str] , *__snake_case :Dict , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *__snake_case :List[Any] , **__snake_case :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *__snake_case :int , **__snake_case :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :List[Any] , **__snake_case :Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Any , *__snake_case :List[str] , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :Tuple , **__snake_case :Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *__snake_case :Optional[int] , **__snake_case :int ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[str] , *__snake_case :int , **__snake_case :str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *__snake_case :Dict , **__snake_case :int ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *__snake_case :Optional[int] , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *__snake_case :Tuple , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *__snake_case :List[Any] , **__snake_case :List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *__snake_case :Any , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *__snake_case :Any , **__snake_case :str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :Tuple , **__snake_case :Dict ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *__snake_case :Optional[Any] , **__snake_case :str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *__snake_case :List[str] , **__snake_case :List[str] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :Dict , **__snake_case :str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Tuple , *__snake_case :Optional[int] , **__snake_case :str ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *__snake_case :Dict , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *__snake_case :Any , **__snake_case :Dict ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *__snake_case :Dict , **__snake_case :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=UpperCamelCase__ ):
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *__snake_case :Union[str, Any] , **__snake_case :List[Any] ):
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 21 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =10
__magic_name__ : str =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
__magic_name__ : Any =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase ) ),
} , features=lowerCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCamelCase )
return filename
# FILE_CONTENT + files
UpperCAmelCase_ : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
__magic_name__ : List[str] =FILE_CONTENT
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
import bza
__magic_name__ : int =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
__magic_name__ : int =bytes(lowerCamelCase , """utf-8""" )
with bza.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
import gzip
__magic_name__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
__magic_name__ : int =bytes(lowerCamelCase , """utf-8""" )
with gzip.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__magic_name__ : Dict =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
__magic_name__ : List[Any] =bytes(lowerCamelCase , """utf-8""" )
with lza.frame.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__magic_name__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase , """w""" ) as archive:
archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
__magic_name__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
import lzma
__magic_name__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
__magic_name__ : Dict =bytes(lowerCamelCase , """utf-8""" )
with lzma.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import zipfile
__magic_name__ : str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__magic_name__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
__magic_name__ : int =bytes(lowerCamelCase , """utf-8""" )
with zstd.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
__magic_name__ : Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return filename
UpperCAmelCase_ : List[Any] = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
UpperCAmelCase_ : str = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
UpperCAmelCase_ : int = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
UpperCAmelCase_ : Any = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
UpperCAmelCase_ : List[Any] = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =datasets.Dataset.from_dict(lowerCamelCase )
__magic_name__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCamelCase ) ) as con:
__magic_name__ : List[Any] =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCamelCase , """w""" , newline="""""" ) as f:
__magic_name__ : Union[str, Any] =csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCamelCase , """w""" , newline="""""" ) as f:
__magic_name__ : Optional[Any] =csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import bza
__magic_name__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCamelCase , """rb""" ) as f:
__magic_name__ : List[Any] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
__magic_name__ : Optional[int] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCamelCase , """wb""" ) as f:
__magic_name__ : str =pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase )
__magic_name__ : Optional[int] =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase ) )] for k in DATA[0]} , schema=lowerCamelCase )
writer.write_table(lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__magic_name__ : Optional[Any] ={"""data""": DATA}
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__magic_name__ : int ={"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import gzip
__magic_name__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCamelCase , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import gzip
__magic_name__ : str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCamelCase , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =["""0""", """1""", """2""", """3"""]
__magic_name__ : Union[str, Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =["""0""", """1""", """2""", """3"""]
__magic_name__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[Any] =["""0""", """1""", """2""", """3"""]
__magic_name__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
__magic_name__ : List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 21 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A ( UpperCamelCase__ ):
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Dict =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self :int ):
'''simple docstring'''
with self.assertRaises(__snake_case ):
__magic_name__ : List[Any] =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A__ ( self :Tuple ):
'''simple docstring'''
with self.assertRaises(__snake_case ):
__magic_name__ : str =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self :str ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__magic_name__ : int =pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[int] =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A__ ( self :int ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__magic_name__ : Dict =pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A__ ( self :Optional[Any] ):
'''simple docstring'''
import PIL.Image
__magic_name__ : int =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__snake_case ) as mock_cast_to_python_objects:
__magic_name__ : Optional[Any] =pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
__magic_name__ , __magic_name__ : List[Any] =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __snake_case )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =pa.BufferReader(lowerCamelCase ) if isinstance(lowerCamelCase , pa.Buffer ) else pa.memory_map(lowerCamelCase )
__magic_name__ : int =pa.ipc.open_stream(lowerCamelCase )
__magic_name__ : pa.Table =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =pa.BufferOutputStream()
__magic_name__ : Any =pa.schema(lowerCamelCase ) if fields else None
with ArrowWriter(stream=lowerCamelCase , schema=lowerCamelCase , writer_batch_size=lowerCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__magic_name__ , __magic_name__ : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__magic_name__ : int ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ):
__magic_name__ : int =pa.BufferOutputStream()
__magic_name__ : int =Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowerCamelCase , features=lowerCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
__magic_name__ , __magic_name__ : Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__magic_name__ : List[Any] =pa.BufferReader(output.getvalue() )
__magic_name__ : Any =pa.ipc.open_stream(lowerCamelCase )
__magic_name__ : pa.Table =f.read_all()
__magic_name__ : Optional[int] =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase , writer_batch_size=lowerCamelCase , hash_salt="""split_name""" , check_duplicates=lowerCamelCase , ) as writer:
with pytest.raises(lowerCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
__magic_name__ , __magic_name__ : List[str] =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase , writer_batch_size=lowerCamelCase , hash_salt="""split_name""" , check_duplicates=lowerCamelCase , ) as writer:
with pytest.raises(lowerCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
__magic_name__ , __magic_name__ : Optional[int] =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase , writer_batch_size=lowerCamelCase , hash_salt="""split_name""" , check_duplicates=lowerCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
__magic_name__ , __magic_name__ : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =pa.BufferOutputStream()
__magic_name__ : Union[str, Any] =pa.schema(lowerCamelCase ) if fields else None
with ArrowWriter(stream=lowerCamelCase , schema=lowerCamelCase , writer_batch_size=lowerCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
__magic_name__ , __magic_name__ : List[str] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__magic_name__ : Union[str, Any] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =pa.BufferOutputStream()
__magic_name__ : Any =pa.schema(lowerCamelCase ) if fields else None
with ArrowWriter(stream=lowerCamelCase , schema=lowerCamelCase , writer_batch_size=lowerCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
__magic_name__ , __magic_name__ : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__magic_name__ : int ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =pa.BufferOutputStream()
__magic_name__ : Union[str, Any] =pa.schema(lowerCamelCase ) if fields else None
with ArrowWriter(stream=lowerCamelCase , schema=lowerCamelCase , writer_batch_size=lowerCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
__magic_name__ , __magic_name__ : Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__magic_name__ : int ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : List[Any] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
__magic_name__ : List[str] =os.path.join(lowerCamelCase , """test.arrow""" )
with ArrowWriter(path=lowerCamelCase , schema=pa.schema(lowerCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
__magic_name__ , __magic_name__ : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase , metadata=writer._schema.metadata )
_check_output(lowerCamelCase , 1 )
def lowerCAmelCase_ ( lowerCamelCase ):
if pa.types.is_list(lowerCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if isinstance(lst[0] , lowerCamelCase ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase )
else:
__magic_name__ : Any =value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =pa.array(TypedSequence(lowerCamelCase , optimized_int_type=lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# in range
__magic_name__ : int =pa.array(OptimizedTypedSequence(lowerCamelCase , col=lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__magic_name__ : List[str] =copy.deepcopy(lowerCamelCase )
__magic_name__ : Optional[Any] =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[int] =pa.array(OptimizedTypedSequence(lowerCamelCase , col=lowerCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowerCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict ="""mock://dataset-train.arrow"""
with ArrowWriter(path=lowerCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__magic_name__ , __magic_name__ : str =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : int =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__magic_name__ , __magic_name__ : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__magic_name__ : str =pa.BufferReader(output.getvalue() )
__magic_name__ : pa.Table =pq.read_table(lowerCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import PIL.Image
__magic_name__ : List[Any] =str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase , format="""png""" )
__magic_name__ : Tuple =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=lowerCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
__magic_name__ : Dict =pa.BufferReader(output.getvalue() )
__magic_name__ : pa.Table =pq.read_table(lowerCamelCase )
__magic_name__ : str =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , lowerCamelCase )
with open(lowerCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase_ ( ):
__magic_name__ : List[str] =pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowerCamelCase )] )
__magic_name__ : str =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 21 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 1 |
from typing import Any
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_validation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# Creates data structures and fill initial step
__magic_name__ : dict ={}
__magic_name__ : dict ={}
for state in states_space:
__magic_name__ : Optional[int] =observations_space[0]
__magic_name__ : List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
__magic_name__ : Any =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCamelCase ) ):
__magic_name__ : List[str] =observations_space[o]
__magic_name__ : Tuple =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__magic_name__ : Union[str, Any] =""""""
__magic_name__ : int =-1
for k_state in states_space:
__magic_name__ : Any =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__magic_name__ : Tuple =probability
__magic_name__ : List[str] =k_state
# Update probabilities and pointers dicts
__magic_name__ : Optional[int] =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__magic_name__ : List[str] =arg_max
# The final observation
__magic_name__ : Optional[Any] =observations_space[len(lowerCamelCase ) - 1]
# argmax for given final observation
__magic_name__ : List[Any] =""""""
__magic_name__ : List[str] =-1
for k_state in states_space:
__magic_name__ : Any =probabilities[(k_state, final_observation)]
if probability > max_probability:
__magic_name__ : List[str] =probability
__magic_name__ : List[str] =k_state
__magic_name__ : Tuple =arg_max
# Process pointers backwards
__magic_name__ : Any =last_state
__magic_name__ : List[Any] =[]
for o in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
result.append(lowerCamelCase )
__magic_name__ : Tuple =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_validate_not_empty(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
_validate_lists(lowerCamelCase , lowerCamelCase )
_validate_dicts(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
_validate_list(lowerCamelCase , """observations_space""" )
_validate_list(lowerCamelCase , """states_space""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if not isinstance(_object , lowerCamelCase ):
__magic_name__ : Any =F"{var_name} must be a list"
raise ValueError(lowerCamelCase )
else:
for x in _object:
if not isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =F"{var_name} must be a list of strings"
raise ValueError(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_validate_dict(lowerCamelCase , """initial_probabilities""" , lowerCamelCase )
_validate_nested_dict(lowerCamelCase , """transition_probabilities""" )
_validate_nested_dict(lowerCamelCase , """emission_probabilities""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
_validate_dict(_object , lowerCamelCase , lowerCamelCase )
for x in _object.values():
_validate_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
if not isinstance(_object , lowerCamelCase ):
__magic_name__ : int =F"{var_name} must be a dict"
raise ValueError(lowerCamelCase )
if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object ):
__magic_name__ : Tuple =F"{var_name} all keys must be strings"
raise ValueError(lowerCamelCase )
if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object.values() ):
__magic_name__ : Tuple ="""nested dictionary """ if nested else """"""
__magic_name__ : Any =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = CanineTokenizer
UpperCamelCase = False
def A__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
__magic_name__ : Optional[int] =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def A__ ( self :Optional[int] , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
__magic_name__ : Optional[int] =10_24
return tokenizer
@require_torch
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =self.canine_tokenizer
__magic_name__ : Any =["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__magic_name__ : Optional[int] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__magic_name__ : Dict =tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
__magic_name__ : Optional[int] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.canine_tokenizer
__magic_name__ : Optional[Any] =["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__magic_name__ : int =tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertIn("""token_type_ids""" , __snake_case )
@require_torch
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.canine_tokenizer
__magic_name__ : List[Any] =[
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__magic_name__ : Any =tokenizer(
text_target=__snake_case , max_length=32 , padding="""max_length""" , truncation=__snake_case , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Any =tempfile.mkdtemp()
__magic_name__ : Union[str, Any] =""" He is very happy, UNwant\u00E9d,running"""
__magic_name__ : List[str] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__ : Optional[Any] =tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__ : str =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
__magic_name__ : int =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str =tempfile.mkdtemp()
__magic_name__ : Optional[int] =""" He is very happy, UNwant\u00E9d,running"""
__magic_name__ : Optional[Any] =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : Optional[int] =chr(0xE_0_0_7 )
additional_special_tokens.append(__snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__magic_name__ : List[Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__ : Optional[Any] =tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__ : List[Any] =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn(__snake_case , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : Optional[int] =tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ , __magic_name__ : List[str] =self.get_clean_sequence(__snake_case )
# a special token for Canine can be defined as follows:
__magic_name__ : Tuple =0xE_0_0_5
__magic_name__ : Tuple =chr(__snake_case )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : Optional[int] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
__magic_name__ : Any =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Optional[int] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(__snake_case , input_encoded + special_token_id )
__magic_name__ : List[str] =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertTrue(special_token not in decoded )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : Tuple =chr(0xE_0_0_5 )
__magic_name__ : Union[str, Any] =chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__magic_name__ : List[Any] =tokenizer.tokenize(__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.tokenize(__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(token_a[0] , __snake_case )
self.assertEqual(token_a[0] , __snake_case )
@require_tokenizers
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__magic_name__ : Dict =0xE_0_0_6
__magic_name__ : Tuple =chr(__snake_case )
__magic_name__ : str =AddedToken(__snake_case , lstrip=__snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__snake_case )
tokenizer.from_pretrained(__snake_case )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : List[Any] =json.load(__snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : str =json.load(__snake_case )
# a special token for Canine can be defined as follows:
__magic_name__ : int =0xE_0_0_6
__magic_name__ : List[str] =chr(__snake_case )
__magic_name__ : Union[str, Any] =[new_token_a]
__magic_name__ : List[Any] =[new_token_a]
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : Union[str, Any] =tokenizer_class.from_pretrained(__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : str =0xE_0_0_7
__magic_name__ : Optional[int] =chr(__snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : List[Any] =[AddedToken(__snake_case , lstrip=__snake_case )]
__magic_name__ : str =tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : List[str] =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : Dict ="""hello world"""
if self.space_between_special_tokens:
__magic_name__ : Dict ="""[CLS] hello world [SEP]"""
else:
__magic_name__ : int =input
__magic_name__ : Any =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.decode(__snake_case , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__snake_case , [output, output.lower()] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : str =[
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__magic_name__ : Union[str, Any] ="""a"""
__magic_name__ : int =ord(__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [] )
__magic_name__ : Optional[int] =0xE_0_0_6
__magic_name__ : Any =chr(__snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def A__ ( self :int ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def A__ ( self :int ):
'''simple docstring'''
pass
| 21 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase=None , lowerCamelCase=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class __A :
UpperCamelCase = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCamelCase = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
UpperCamelCase = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Benchmark training of model"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Verbose memory tracing"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Trace memory line by line"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Save result to a CSV file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Save all print statements in a log file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to print environment information"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCamelCase = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCamelCase = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCamelCase = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCamelCase = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCamelCase = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCamelCase = field(
default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCamelCase = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def A__ ( self :List[Any] ):
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __snake_case , )
def A__ ( self :Dict ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 21 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =XCLIPTextConfig()
# derive patch size from model name
__magic_name__ : int =model_name.find("""patch""" )
__magic_name__ : str =int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__magic_name__ : Dict =XCLIPVisionConfig(patch_size=lowerCamelCase , num_frames=lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
__magic_name__ : Tuple =3072
__magic_name__ : str =12
__magic_name__ : Optional[Any] =1024
__magic_name__ : List[str] =4096
__magic_name__ : Union[str, Any] =16
__magic_name__ : Union[str, Any] =24
__magic_name__ : Tuple =768
__magic_name__ : Union[str, Any] =3072
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Dict =336
__magic_name__ : Any =XCLIPConfig.from_text_vision_configs(lowerCamelCase , lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
return config
def lowerCAmelCase_ ( lowerCamelCase ):
# text encoder
if name == "token_embedding.weight":
__magic_name__ : int =name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__magic_name__ : Union[str, Any] =name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__magic_name__ : int =name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__magic_name__ : Optional[Any] =name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__magic_name__ : Any =name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__magic_name__ : List[str] =name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ : Optional[Any] =name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__magic_name__ : str =name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ : Optional[int] =name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__magic_name__ : Dict =name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__magic_name__ : Optional[int] =name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__magic_name__ : Any =name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__magic_name__ : Optional[Any] =name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__magic_name__ : Optional[int] =name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__magic_name__ : List[Any] =name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__magic_name__ : Optional[int] =name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ : Union[str, Any] =name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__magic_name__ : List[Any] =name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__magic_name__ : Union[str, Any] =name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__magic_name__ : int =name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__magic_name__ : Dict =orig_state_dict.pop(lowerCamelCase )
if "attn.in_proj" in key:
__magic_name__ : Any =key.split(""".""" )
if key.startswith("""visual""" ):
__magic_name__ : Tuple =key_split[3]
__magic_name__ : Dict =config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ : Optional[Any] =val[
:dim, :
]
__magic_name__ : Optional[Any] =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Union[str, Any] =val[
:dim
]
__magic_name__ : Union[str, Any] =val[
dim : dim * 2
]
__magic_name__ : int =val[
-dim:
]
else:
if "weight" in key:
__magic_name__ : str =val[
:dim, :
]
__magic_name__ : Any =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Any =val[:dim]
__magic_name__ : List[str] =val[
dim : dim * 2
]
__magic_name__ : Any =val[-dim:]
elif key.startswith("""mit""" ):
__magic_name__ : Dict =key_split[2]
__magic_name__ : str =config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ : Optional[int] =val[:dim, :]
__magic_name__ : Union[str, Any] =val[dim : dim * 2, :]
__magic_name__ : int =val[-dim:, :]
else:
__magic_name__ : List[Any] =val[:dim]
__magic_name__ : Tuple =val[dim : dim * 2]
__magic_name__ : Any =val[-dim:]
else:
__magic_name__ : Union[str, Any] =key_split[2]
__magic_name__ : int =config.text_config.hidden_size
if "weight" in key:
__magic_name__ : List[str] =val[:dim, :]
__magic_name__ : List[str] =val[
dim : dim * 2, :
]
__magic_name__ : List[Any] =val[-dim:, :]
else:
__magic_name__ : Optional[Any] =val[:dim]
__magic_name__ : str =val[
dim : dim * 2
]
__magic_name__ : Optional[int] =val[-dim:]
else:
__magic_name__ : Optional[int] =rename_key(lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ : Any =val.T
__magic_name__ : Optional[Any] =val
return orig_state_dict
def lowerCAmelCase_ ( lowerCamelCase ):
if num_frames == 8:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__magic_name__ : Union[str, Any] ="""eating_spaghetti.npy"""
elif num_frames == 32:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_32_frames.npy"""
__magic_name__ : str =hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowerCamelCase , repo_type="""dataset""" , )
__magic_name__ : Union[str, Any] =np.load(lowerCamelCase )
return list(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : Tuple ={
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__magic_name__ : List[str] =model_to_url[model_name]
__magic_name__ : List[Any] =8
if "16-frames" in model_name:
__magic_name__ : Any =16
elif "shot" in model_name:
__magic_name__ : Optional[int] =32
__magic_name__ : Union[str, Any] =get_xclip_config(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ : Optional[int] ="""pytorch_model.bin"""
gdown.cached_download(lowerCamelCase , lowerCamelCase , quiet=lowerCamelCase )
__magic_name__ : Optional[int] =torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
else:
__magic_name__ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase )["""model"""]
__magic_name__ : Optional[Any] =convert_state_dict(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
__magic_name__ , __magic_name__ : Dict =model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ : str =336 if model_name == """xclip-large-patch14-16-frames""" else 224
__magic_name__ : Union[str, Any] =VideoMAEImageProcessor(size=lowerCamelCase )
__magic_name__ : Any =CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : List[str] =CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : Tuple =XCLIPProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : str =prepare_video(lowerCamelCase )
__magic_name__ : List[Any] =processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ : Any =model(**lowerCamelCase )
# Verify outputs
__magic_name__ : Dict =outputs.logits_per_video
__magic_name__ : Optional[Any] =logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ : Union[str, Any] =torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ : Tuple =torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
__magic_name__ : str =torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ : List[Any] =torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
__magic_name__ : Optional[int] =torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Union[str, Any] =torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ : Any =torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ : str =torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ : Dict =torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ : str =torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ : Tuple =torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ : Optional[Any] =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ : int =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ : Optional[Any] =torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ : Any =torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ : int =torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowerCamelCase , organization="""nielsr""" )
processor.push_to_hub(lowerCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowerCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {"vocab_file": "spiece.model"}
UpperCAmelCase_ : List[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
UpperCAmelCase_ : Optional[Any] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :List[Any] , __snake_case :Union[str, Any] , __snake_case :str=False , __snake_case :List[str]=False , __snake_case :str=False , __snake_case :str=None , __snake_case :List[str]=None , __snake_case :Tuple=None , __snake_case :Dict=None , __snake_case :Optional[Dict[str, Any]] = None , **__snake_case :Optional[Any] , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : List[Any] =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__magic_name__ : Any ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__magic_name__ : Optional[int] ="""<|endoftext|>""" if eos_token is None else eos_token
__magic_name__ : List[Any] ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__magic_name__ : int =unk_token if pad_token is None else pad_token
__magic_name__ : int =eos_token if bos_token is None else bos_token
else:
__magic_name__ : Optional[Any] ="""<pad>""" if pad_token is None else pad_token
__magic_name__ : Optional[int] ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__magic_name__ : Tuple =do_lower_case
__magic_name__ : List[str] =remove_space
__magic_name__ : Optional[Any] =keep_accents
__magic_name__ : int =vocab_file
__magic_name__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# Used for whitespace normalization in input texts
# fmt : off
__magic_name__ : List[Any] ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__magic_name__ : str =re.compile(
f"[{''.join(map(__snake_case , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]" )
def __getstate__( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.__dict__.copy()
__magic_name__ : List[str] =None
return state
def __setstate__( self :List[Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : List[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Dict ={}
__magic_name__ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A__ ( self :Any ):
'''simple docstring'''
return len(self.sp_model )
def A__ ( self :Any , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.non_printing_characters_re.sub("""""" , __snake_case )
# Normalize whitespaces
__magic_name__ : Union[str, Any] ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__magic_name__ : List[str] =unicodedata.normalize("""NFC""" , __snake_case )
return text
def A__ ( self :Dict , __snake_case :str , **__snake_case :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =self.preprocess_text(__snake_case )
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A__ ( self :Union[str, Any] , __snake_case :str ):
'''simple docstring'''
return self.sp_model.PieceToId(__snake_case )
def A__ ( self :Dict , __snake_case :int ):
'''simple docstring'''
return self.sp_model.IdToPiece(__snake_case )
@staticmethod
def A__ ( __snake_case :str ):
'''simple docstring'''
return out_string
def A__ ( self :Optional[int] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[int] =[]
__magic_name__ : List[Any] =""""""
__magic_name__ : Union[str, Any] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
__magic_name__ : int =True
__magic_name__ : str =[]
else:
current_sub_tokens.append(__snake_case )
__magic_name__ : List[Any] =False
out_string += self.sp_model.decode(__snake_case )
return out_string
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : List[Any] ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self :List[str] , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__magic_name__ : Tuple =os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
__magic_name__ : Dict =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def A__ ( self :Dict , __snake_case :Union[str, List[str]] , __snake_case :Union[str, bool] = False ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__magic_name__ : List[str] =self.preprocess_text(__snake_case )
__magic_name__ : Optional[int] =self.sp_model.encode(__snake_case )
else:
__magic_name__ : str =[self.preprocess_text(__snake_case ) for t in text]
__magic_name__ : Optional[Any] =self.sp_model.encode(__snake_case )
if return_tensors is True or return_tensors == "pt":
__magic_name__ : Any =torch.tensor(__snake_case )
return token_ids
def A__ ( self :Tuple , __snake_case :Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(__snake_case )
def A__ ( self :Any , __snake_case :"Conversation" ):
'''simple docstring'''
__magic_name__ : List[str] =[f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
__magic_name__ : str =(
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(__snake_case ) + f"{self.bos_token}Bot:"
)
return self.encode(text=__snake_case )
| 21 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
UpperCamelCase = 10000
UpperCamelCase = None
UpperCamelCase = None
class __A ( datasets.ArrowBasedBuilder ):
UpperCamelCase = ParquetConfig
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self :List[Any] , __snake_case :Optional[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
__magic_name__ : Any =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
__magic_name__ : Optional[Any] =data_files
if isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : List[str] =[dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__magic_name__ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Union[str, Any] =[dl_manager.iter_files(__snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__snake_case ):
with open(__snake_case , """rb""" ) as f:
__magic_name__ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(__snake_case ) )
break
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def A__ ( self :List[Any] , __snake_case :pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__magic_name__ : Union[str, Any] =table_cast(__snake_case , self.info.features.arrow_schema )
return pa_table
def A__ ( self :Tuple , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
__magic_name__ : Optional[Any] =pq.ParquetFile(__snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__magic_name__ : Dict =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(__snake_case )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" )
raise
| 21 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[int] , __snake_case :int="</s>" , __snake_case :List[Any]="<unk>" , __snake_case :Optional[int]="<pad>" , __snake_case :Any=1_25 , __snake_case :Optional[Any]=None , **__snake_case :Optional[int] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__magic_name__ : Tuple =[f"<extra_id_{i}>" for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__magic_name__ : List[Any] =len(set(filter(lambda __snake_case : bool("""extra_id""" in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
__magic_name__ : Tuple =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
__magic_name__ : List[str] =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
__magic_name__ : int =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__magic_name__ : Union[str, Any] =extra_ids
__magic_name__ : Tuple =2**8 # utf is 8 bits
# define special tokens dict
__magic_name__ : Dict[int, str] ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__magic_name__ : Optional[int] =len(self.special_tokens_encoder )
__magic_name__ : Any =len(__snake_case )
for i, token in enumerate(__snake_case ):
__magic_name__ : Union[str, Any] =self.vocab_size + i - n
__magic_name__ : Dict[str, int] ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def A__ ( self :Tuple , __snake_case :List[int] , __snake_case :Optional[List[int]] = None , __snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__snake_case )) + [1]
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def A__ ( self :Union[str, Any] , __snake_case :List[int] ):
'''simple docstring'''
if len(__snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self :Any , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self :Union[str, Any] , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self._add_eos_if_not_present(__snake_case )
if token_ids_a is None:
return token_ids_a
else:
__magic_name__ : List[str] =self._add_eos_if_not_present(__snake_case )
return token_ids_a + token_ids_a
def A__ ( self :Tuple , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Dict =[chr(__snake_case ) for i in text.encode("""utf-8""" )]
return tokens
def A__ ( self :int , __snake_case :List[str] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
__magic_name__ : Dict =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__magic_name__ : int =self.added_tokens_encoder[token]
elif len(__snake_case ) != 1:
__magic_name__ : Optional[Any] =self.unk_token_id
else:
__magic_name__ : Any =ord(__snake_case ) + self._num_special_tokens
return token_id
def A__ ( self :Optional[Any] , __snake_case :int ):
'''simple docstring'''
if index in self.special_tokens_decoder:
__magic_name__ : Any =self.special_tokens_decoder[index]
else:
__magic_name__ : int =chr(index - self._num_special_tokens )
return token
def A__ ( self :Union[str, Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Any =B""""""
for token in tokens:
if token in self.special_tokens_decoder:
__magic_name__ : int =self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
__magic_name__ : Union[str, Any] =self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
__magic_name__ : str =token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
__magic_name__ : Optional[int] =token.encode("""utf-8""" )
else:
__magic_name__ : Tuple =bytes([ord(__snake_case )] )
bstring += tok_string
__magic_name__ : str =bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def A__ ( self :Tuple , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
return ()
| 21 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase_ : Dict = random.Random()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=None ):
if rng is None:
__magic_name__ : Dict =global_rng
__magic_name__ : int =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
def __init__( self :Tuple , __snake_case :List[str] , __snake_case :Union[str, Any]=7 , __snake_case :int=4_00 , __snake_case :Dict=20_00 , __snake_case :Optional[int]=10 , __snake_case :int=1_60 , __snake_case :Union[str, Any]=8 , __snake_case :Any=0.0 , __snake_case :str=40_00 , __snake_case :Dict=False , __snake_case :Optional[Any]=True , ):
'''simple docstring'''
__magic_name__ : Tuple =parent
__magic_name__ : Optional[Any] =batch_size
__magic_name__ : Optional[int] =min_seq_length
__magic_name__ : Optional[int] =max_seq_length
__magic_name__ : int =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ : int =padding_value
__magic_name__ : Any =sampling_rate
__magic_name__ : Optional[Any] =return_attention_mask
__magic_name__ : List[str] =do_normalize
__magic_name__ : str =feature_size
__magic_name__ : Optional[int] =chunk_length
__magic_name__ : Tuple =hop_length
def A__ ( self :Any ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A__ ( self :str , __snake_case :Dict=False , __snake_case :Any=False ):
'''simple docstring'''
def _flatten(__snake_case :List[str] ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
__magic_name__ : Tuple =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ : List[Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ : Optional[Any] =[np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Optional[Any] =WhisperFeatureExtractionTester(self )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : int =feat_extract_first.save_pretrained(__snake_case )[0]
check_json_file_has_correct_format(__snake_case )
__magic_name__ : Dict =self.feature_extraction_class.from_pretrained(__snake_case )
__magic_name__ : str =feat_extract_first.to_dict()
__magic_name__ : Union[str, Any] =feat_extract_second.to_dict()
__magic_name__ : int =feat_extract_first.mel_filters
__magic_name__ : List[Any] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__snake_case , __snake_case ) )
self.assertEqual(__snake_case , __snake_case )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : str =os.path.join(__snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(__snake_case )
__magic_name__ : Tuple =self.feature_extraction_class.from_json_file(__snake_case )
__magic_name__ : str =feat_extract_first.to_dict()
__magic_name__ : Union[str, Any] =feat_extract_second.to_dict()
__magic_name__ : Dict =feat_extract_first.mel_filters
__magic_name__ : List[str] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__snake_case , __snake_case ) )
self.assertEqual(__snake_case , __snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ : Dict =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__magic_name__ : List[str] =[np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test feature size
__magic_name__ : Dict =feature_extractor(__snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__magic_name__ : Optional[int] =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__magic_name__ : int =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# Test batched
__magic_name__ : Union[str, Any] =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
__magic_name__ : int =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__magic_name__ : Optional[Any] =[floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__magic_name__ : Tuple =np.asarray(__snake_case )
__magic_name__ : List[str] =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
__magic_name__ : Dict =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# Test truncation required
__magic_name__ : Any =[floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__magic_name__ : int =[np.asarray(__snake_case ) for speech_input in speech_inputs]
__magic_name__ : List[Any] =[x[: feature_extractor.n_samples] for x in speech_inputs]
__magic_name__ : str =[np.asarray(__snake_case ) for speech_input in speech_inputs_truncated]
__magic_name__ : Optional[Any] =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
__magic_name__ : int =feature_extractor(__snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def A__ ( self :Any ):
'''simple docstring'''
import torch
__magic_name__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ : str =np.random.rand(1_00 , 32 ).astype(np.floataa )
__magic_name__ : int =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__magic_name__ : Dict =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__magic_name__ : List[str] =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A__ ( self :Optional[int] , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : str =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__magic_name__ : Any =ds.sort("""id""" ).select(range(__snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : int =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__magic_name__ : List[Any] =self._load_datasamples(1 )
__magic_name__ : int =WhisperFeatureExtractor()
__magic_name__ : Optional[Any] =feature_extractor(__snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __snake_case , atol=1E-4 ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ : Tuple =self._load_datasamples(1 )[0]
__magic_name__ : List[str] =((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__magic_name__ : List[str] =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__snake_case )[0]
self.assertTrue(np.all(np.mean(__snake_case ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__snake_case ) - 1 ) < 1E-3 ) )
| 21 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 1 |
class __A :
def __init__( self :Any , __snake_case :str = "" , __snake_case :bool = False ):
'''simple docstring'''
__magic_name__ : dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
__magic_name__ : Tuple =is_leaf
__magic_name__ : Optional[Any] =prefix
def A__ ( self :Tuple , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self :Optional[int] , __snake_case :list[str] ):
'''simple docstring'''
for word in words:
self.insert(__snake_case )
def A__ ( self :List[Any] , __snake_case :str ):
'''simple docstring'''
if self.prefix == word:
__magic_name__ : str =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__magic_name__ : Optional[int] =RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
__magic_name__ : Union[str, Any] =self.nodes[word[0]]
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__magic_name__ : Tuple =remaining_prefix
__magic_name__ : Optional[Any] =self.nodes[matching_string[0]]
__magic_name__ : Optional[Any] =RadixNode(__snake_case , __snake_case )
__magic_name__ : int =aux_node
if remaining_word == "":
__magic_name__ : int =True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def A__ ( self :Any , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
__magic_name__ , __magic_name__ , __magic_name__ : Dict =incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def A__ ( self :Optional[Any] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Tuple =self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
__magic_name__ , __magic_name__ , __magic_name__ : int =incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__magic_name__ : List[Any] =list(self.nodes.values() )[0]
__magic_name__ : List[str] =merging_node.is_leaf
self.prefix += merging_node.prefix
__magic_name__ : List[str] =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__magic_name__ : str =False
# If there is 1 edge, we merge it with its child
else:
__magic_name__ : Optional[Any] =list(incoming_node.nodes.values() )[0]
__magic_name__ : List[Any] =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__magic_name__ : int =merging_node.nodes
return True
def A__ ( self :Tuple , __snake_case :int = 0 ):
'''simple docstring'''
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ):
__magic_name__ : List[Any] ="""banana bananas bandana band apple all beast""".split()
__magic_name__ : Dict =RadixNode()
root.insert_many(lowerCamelCase )
assert all(root.find(lowerCamelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] =RadixNode()
__magic_name__ : List[str] ="""banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(lowerCamelCase )
print("""Words:""" , lowerCamelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 21 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCAmelCase_ : List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCAmelCase_ : Union[str, Any] = dataset.iloc[:, 1:2].values
UpperCAmelCase_ : Dict = dataset.iloc[:, 2].values
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCAmelCase_ : Union[str, Any] = PolynomialFeatures(degree=4)
UpperCAmelCase_ : Any = poly_reg.fit_transform(X)
UpperCAmelCase_ : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ):
plt.scatter(lowerCamelCase , lowerCamelCase , color="""red""" )
plt.plot(lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 21 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ , __magic_name__ : Union[str, Any] =analyze_text(lowerCamelCase )
__magic_name__ : List[Any] =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__magic_name__ : Union[str, Any] =sum(single_char_strings.values() )
# one length string
__magic_name__ : Optional[int] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__magic_name__ : str =single_char_strings[ch]
__magic_name__ : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__magic_name__ : Optional[Any] =sum(two_char_strings.values() )
__magic_name__ : int =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__magic_name__ : List[str] =cha + cha
if sequence in two_char_strings:
__magic_name__ : str =two_char_strings[sequence]
__magic_name__ : Union[str, Any] =int(lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =Counter() # type: ignore
__magic_name__ : int =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
__magic_name__ : List[str] ="""https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
__magic_name__ : Dict =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =[]
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =dct.pop(lowerCamelCase )
__magic_name__ : int =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] =state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
__magic_name__ : Tuple =state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__magic_name__ : Tuple =torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
__magic_name__ : Dict =qkv_bias
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =364 if """coco""" in model_name else 224
__magic_name__ : Union[str, Any] =InstructBlipVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__magic_name__ : List[Any] =TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : Dict =TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__magic_name__ : Union[str, Any] =LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
__magic_name__ : Union[str, Any] =LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__magic_name__ : Dict =InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
__magic_name__ : Optional[Any] =InstructBlipConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase , qformer_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : Dict =AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
__magic_name__ : List[str] =TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__magic_name__ : Union[str, Any] =LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
__magic_name__ , __magic_name__ : Union[str, Any] =get_blipa_config(lowerCamelCase )
__magic_name__ : List[Any] =InstructBlipForConditionalGeneration(lowerCamelCase ).eval()
__magic_name__ : List[Any] ={
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
__magic_name__ , __magic_name__ : List[str] =model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__magic_name__ : Union[str, Any] ="""cuda:1""" if torch.cuda.is_available() else """cpu"""
__magic_name__ : str ="""cuda:2""" if torch.cuda.is_available() else """cpu"""
__magic_name__ , __magic_name__ , __magic_name__ : Dict =load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__magic_name__ : List[str] =original_model.state_dict()
__magic_name__ : Dict =create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any =state_dict.pop(lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
__magic_name__ : Dict =key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__magic_name__ : Dict =key.replace("""self""" , """attention""" )
if "llm_proj" in key:
__magic_name__ : str =key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
__magic_name__ : int =key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
__magic_name__ : Optional[int] =key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
__magic_name__ : int =key.replace("""t5""" , """language""" )
__magic_name__ : Dict =val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
__magic_name__ : Tuple =load_demo_image()
__magic_name__ : Any ="""What is unusual about this image?"""
# create processor
__magic_name__ : int =BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
__magic_name__ : Optional[int] =InstructBlipProcessor(
image_processor=lowerCamelCase , tokenizer=lowerCamelCase , qformer_tokenizer=lowerCamelCase , )
__magic_name__ : Union[str, Any] =processor(images=lowerCamelCase , text=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# make sure processor creates exact same pixel values
__magic_name__ : Optional[int] =vis_processors["""eval"""](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
__magic_name__ : int =inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
__magic_name__ : Dict =original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
__magic_name__ : Tuple =hf_model(**lowerCamelCase ).logits
else:
__magic_name__ : int =original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
__magic_name__ : Tuple =tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
__magic_name__ : Union[str, Any] =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : Tuple =hf_model(**lowerCamelCase , labels=lowerCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__magic_name__ : Optional[int] =1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowerCamelCase , atol=lowerCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
__magic_name__ : Optional[int] =original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
__magic_name__ : List[Any] =hf_model.generate(
**lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__magic_name__ : Optional[Any] =2
print("""Original generation:""" , lowerCamelCase )
__magic_name__ : int =processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
__magic_name__ : Any =[text.strip() for text in output_text]
print("""HF generation:""" , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F"Salesforce/{model_name}" )
hf_model.push_to_hub(F"Salesforce/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
UpperCAmelCase_ : Dict = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
def __init__( self :str , __snake_case :str , __snake_case :Tuple=13 , __snake_case :List[str]=7 , __snake_case :List[str]=True , __snake_case :Dict=True , __snake_case :str=True , __snake_case :Optional[int]=True , __snake_case :Union[str, Any]=99 , __snake_case :List[str]=32 , __snake_case :Tuple=5 , __snake_case :Optional[int]=4 , __snake_case :Any=37 , __snake_case :Any="gelu" , __snake_case :Dict=0.1 , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=5_12 , __snake_case :int=16 , __snake_case :List[Any]=2 , __snake_case :str=0.02 , __snake_case :Dict=4 , ):
'''simple docstring'''
__magic_name__ : int =parent
__magic_name__ : Dict =batch_size
__magic_name__ : List[str] =seq_length
__magic_name__ : Optional[int] =is_training
__magic_name__ : Any =use_attention_mask
__magic_name__ : List[str] =use_token_type_ids
__magic_name__ : Any =use_labels
__magic_name__ : List[Any] =vocab_size
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : Tuple =num_hidden_layers
__magic_name__ : List[str] =num_attention_heads
__magic_name__ : int =intermediate_size
__magic_name__ : Optional[int] =hidden_act
__magic_name__ : str =hidden_dropout_prob
__magic_name__ : int =attention_probs_dropout_prob
__magic_name__ : str =max_position_embeddings
__magic_name__ : List[str] =type_vocab_size
__magic_name__ : Tuple =type_sequence_label_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Any =num_choices
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[int] =None
if self.use_attention_mask:
__magic_name__ : str =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Union[str, Any] =None
if self.use_token_type_ids:
__magic_name__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Any =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =config_and_inputs
__magic_name__ : Union[str, Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =FlaxRoFormerModelTester(self )
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__snake_case )
__magic_name__ : List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__magic_name__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : str =model(__snake_case )[0]
__magic_name__ : int =5_00_00
__magic_name__ : Dict =(1, 6, vocab_size)
self.assertEqual(output.shape , __snake_case )
__magic_name__ : List[Any] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 21 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """roformer"""
def __init__( self :str , __snake_case :Tuple=5_00_00 , __snake_case :Optional[Any]=None , __snake_case :Union[str, Any]=7_68 , __snake_case :List[Any]=12 , __snake_case :int=12 , __snake_case :str=30_72 , __snake_case :Union[str, Any]="gelu" , __snake_case :List[str]=0.1 , __snake_case :List[str]=0.1 , __snake_case :Union[str, Any]=15_36 , __snake_case :str=2 , __snake_case :Any=0.02 , __snake_case :Tuple=1E-12 , __snake_case :Dict=0 , __snake_case :Optional[int]=False , __snake_case :Optional[int]=True , **__snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , **__snake_case )
__magic_name__ : Dict =vocab_size
__magic_name__ : int =hidden_size if embedding_size is None else embedding_size
__magic_name__ : str =hidden_size
__magic_name__ : Any =num_hidden_layers
__magic_name__ : int =num_attention_heads
__magic_name__ : List[Any] =hidden_act
__magic_name__ : Dict =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : List[str] =attention_probs_dropout_prob
__magic_name__ : List[Any] =max_position_embeddings
__magic_name__ : Optional[int] =type_vocab_size
__magic_name__ : int =initializer_range
__magic_name__ : int =layer_norm_eps
__magic_name__ : Optional[Any] =rotary_value
__magic_name__ : Optional[int] =use_cache
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : Optional[int] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : str ={0: """batch""", 1: """sequence"""}
__magic_name__ : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 21 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase_ : List[str] = "src/transformers"
UpperCAmelCase_ : List[str] = "docs/source/en"
UpperCAmelCase_ : str = "."
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__magic_name__ : Optional[int] =f.readlines()
# Find the start prompt.
__magic_name__ : Dict =0
while not lines[start_index].startswith(lowerCamelCase ):
start_index += 1
start_index += 1
__magic_name__ : Union[str, Any] =start_index
while not lines[end_index].startswith(lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase_ : Any = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : Any = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
UpperCAmelCase_ : Tuple = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : Optional[int] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase )
return [m.group(0 ) for m in matches]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =2 if text == """✅""" or text == """❌""" else len(lowerCamelCase )
__magic_name__ : Optional[Any] =(width - text_length) // 2
__magic_name__ : str =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase_ ( ):
__magic_name__ : Union[str, Any] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ : Optional[int] ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ : Union[str, Any] ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ : Any =collections.defaultdict(lowerCamelCase )
__magic_name__ : Union[str, Any] =collections.defaultdict(lowerCamelCase )
__magic_name__ : List[Any] =collections.defaultdict(lowerCamelCase )
__magic_name__ : int =collections.defaultdict(lowerCamelCase )
__magic_name__ : Dict =collections.defaultdict(lowerCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase ):
__magic_name__ : Dict =None
if attr_name.endswith("""Tokenizer""" ):
__magic_name__ : Optional[Any] =slow_tokenizers
__magic_name__ : str =attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__magic_name__ : Tuple =fast_tokenizers
__magic_name__ : Dict =attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase ) is not None:
__magic_name__ : List[str] =tf_models
__magic_name__ : List[str] =_re_tf_models.match(lowerCamelCase ).groups()[0]
elif _re_flax_models.match(lowerCamelCase ) is not None:
__magic_name__ : Tuple =flax_models
__magic_name__ : Tuple =_re_flax_models.match(lowerCamelCase ).groups()[0]
elif _re_pt_models.match(lowerCamelCase ) is not None:
__magic_name__ : List[Any] =pt_models
__magic_name__ : Any =_re_pt_models.match(lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ : Optional[int] =True
break
# Try again after removing the last word in the name
__magic_name__ : Union[str, Any] ="""""".join(camel_case_split(lowerCamelCase )[:-1] )
# Let's build that table!
__magic_name__ : List[str] =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ : Union[str, Any] =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ : Optional[int] =[len(lowerCamelCase ) + 2 for c in columns]
__magic_name__ : int =max([len(lowerCamelCase ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ : List[Any] ="""|""" + """|""".join([_center_text(lowerCamelCase , lowerCamelCase ) for c, w in zip(lowerCamelCase , lowerCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__magic_name__ : Optional[int] ={True: """✅""", False: """❌"""}
for name in model_names:
__magic_name__ : Optional[Any] =model_name_to_prefix[name]
__magic_name__ : Optional[Any] =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase , lowerCamelCase ) for l, w in zip(lowerCamelCase , lowerCamelCase )] ) + "|\n"
return table
def lowerCAmelCase_ ( lowerCamelCase=False ):
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =_find_text_in_file(
filename=os.path.join(lowerCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
__magic_name__ : Any =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase_ : Dict = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( UpperCamelCase__ ):
UpperCamelCase = """"""
UpperCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCamelCase = None # compression type in fsspec. ex: "gzip"
UpperCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Any , __snake_case :str = "" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , **__snake_case :Dict ):
'''simple docstring'''
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__magic_name__ : Any =fsspec.open(
__snake_case , mode="""rb""" , protocol=__snake_case , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__magic_name__ : Union[str, Any] =os.path.basename(self.file.path.split("""::""" )[0] )
__magic_name__ : Optional[int] =(
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__magic_name__ : Optional[Any] =None
@classmethod
def A__ ( cls :Tuple , __snake_case :str ):
'''simple docstring'''
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def A__ ( self :Any ):
'''simple docstring'''
if self.dir_cache is None:
__magic_name__ : Optional[Any] ={**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__magic_name__ : Optional[Any] ={f["""name"""]: f}
def A__ ( self :Any , __snake_case :str ):
'''simple docstring'''
return self.file.open().read()
def A__ ( self :List[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :str=None , __snake_case :List[Any]=True , __snake_case :Any=None , **__snake_case :Any , ):
'''simple docstring'''
__magic_name__ : Tuple =self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __A ( UpperCamelCase__ ):
UpperCamelCase = """bz2"""
UpperCamelCase = """bz2"""
UpperCamelCase = """.bz2"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """gzip"""
UpperCamelCase = """gzip"""
UpperCamelCase = """.gz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """lz4"""
UpperCamelCase = """lz4"""
UpperCamelCase = """.lz4"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xz"""
UpperCamelCase = """xz"""
UpperCamelCase = """.xz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """zstd"""
UpperCamelCase = """zstd"""
UpperCamelCase = """.zst"""
def __init__( self :Optional[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , __snake_case :int = DEFAULT_BLOCK_SIZE , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__magic_name__ : Dict =self.file.__enter__
class __A :
def __init__( self :List[Any] , __snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : int =file_
def __enter__( self :Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self :Dict , *__snake_case :str , **__snake_case :Any ):
'''simple docstring'''
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self :Tuple ):
'''simple docstring'''
return iter(self._file )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self :Any , __snake_case :Optional[Any] ):
'''simple docstring'''
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case :Union[str, Any] , **__snake_case :int ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
__magic_name__ : List[str] =fixed_enter
| 21 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( *lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
__magic_name__ : Dict =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCamelCase , lowerCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase_ ( lowerCamelCase = None , lowerCamelCase = 128 ):
if function is None:
return functools.partial(lowerCamelCase , starting_batch_size=lowerCamelCase )
__magic_name__ : List[Any] =starting_batch_size
def decorator(*lowerCamelCase , **lowerCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__magic_name__ : Optional[Any] =list(inspect.signature(lowerCamelCase ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase ) < (len(lowerCamelCase ) + 1):
__magic_name__ : Optional[int] =""", """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 21 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__magic_name__ : Optional[Any] =re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase )
if matches:
__magic_name__ : Dict =float(matches[1] )
__magic_name__ : Tuple =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ : str =1001
__magic_name__ : Union[str, Any] ="""imagenet-1k-id2label.json"""
__magic_name__ : List[str] ="""huggingface/label-files"""
__magic_name__ : Optional[Any] =json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__magic_name__ : List[str] ={int(lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__magic_name__ : Tuple ="""background"""
__magic_name__ : List[Any] =idalabel
__magic_name__ : Optional[Any] ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ):
__magic_name__ : Dict ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Any =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : List[str] =get_mobilenet_va_config(lowerCamelCase )
# Load 🤗 model
__magic_name__ : List[str] =MobileNetVaForImageClassification(lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ : Optional[int] =MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__magic_name__ : Any =image_processor(images=prepare_img() , return_tensors="""pt""" )
__magic_name__ : Tuple =model(**lowerCamelCase )
__magic_name__ : Dict =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ : Union[str, Any] =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ : List[Any] =torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__magic_name__ : Union[str, Any] =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__magic_name__ : Tuple ="""google/""" + model_name
image_processor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 21 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 1 |
def lowerCAmelCase_ ( lowerCamelCase = 1000 ):
return sum(e for e in range(3 , lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 1 |
from functools import reduce
UpperCAmelCase_ : Optional[int] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase , lowerCamelCase : str(int(lowerCamelCase ) * int(lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] ="""https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__magic_name__ : Optional[int] =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[Any] =[]
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =dct.pop(lowerCamelCase )
__magic_name__ : Optional[int] =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] =state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
__magic_name__ : List[Any] =state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__magic_name__ : List[Any] =torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
__magic_name__ : int =qkv_bias
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =364 if """coco""" in model_name else 224
__magic_name__ : Dict =BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[Any] =OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : int =OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict =TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : Union[str, Any] =TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
__magic_name__ : Dict =BlipaConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : List[Any] =(
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__magic_name__ : List[str] =tokenizer("""\n""" , add_special_tokens=lowerCamelCase ).input_ids[0]
__magic_name__ , __magic_name__ : Dict =get_blipa_config(lowerCamelCase , eos_token_id=lowerCamelCase )
__magic_name__ : Union[str, Any] =BlipaForConditionalGeneration(lowerCamelCase ).eval()
__magic_name__ : List[Any] ={
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__magic_name__ , __magic_name__ : int =model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__magic_name__ : List[str] ="""cuda""" if torch.cuda.is_available() else """cpu"""
__magic_name__ , __magic_name__ , __magic_name__ : str =load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__magic_name__ : Tuple =original_model.state_dict()
__magic_name__ : str =create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Optional[Any] =state_dict.pop(lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
__magic_name__ : Tuple =key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__magic_name__ : str =key.replace("""self""" , """attention""" )
if "opt_proj" in key:
__magic_name__ : Optional[int] =key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
__magic_name__ : Tuple =key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
__magic_name__ : Union[str, Any] =key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
__magic_name__ : List[str] =key.replace("""t5""" , """language""" )
__magic_name__ : List[str] =val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
__magic_name__ , __magic_name__ : int =hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : int =load_demo_image()
__magic_name__ : Tuple =vis_processors["""eval"""](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
__magic_name__ : List[Any] =tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
# create processor
__magic_name__ : Union[str, Any] =BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
__magic_name__ : Optional[int] =BlipaProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : Optional[Any] =processor(images=lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] =original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__magic_name__ : List[str] =hf_model(lowerCamelCase , lowerCamelCase ).logits
else:
__magic_name__ : Union[str, Any] =original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__magic_name__ : Any =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] =hf_model(lowerCamelCase , lowerCamelCase , labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : Tuple =torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Union[str, Any] =torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowerCamelCase )
else:
# cast to same type
__magic_name__ : Any =logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ) , lowerCamelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__magic_name__ : Dict =""""""
__magic_name__ : List[Any] =tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
__magic_name__ : List[str] =original_model.generate({"""image""": original_pixel_values} )
__magic_name__ : int =hf_model.generate(
lowerCamelCase , lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowerCamelCase )
__magic_name__ : List[Any] =input_ids.shape[1]
__magic_name__ : int =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase )
__magic_name__ : int =[text.strip() for text in output_text]
print("""HF generation:""" , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
UpperCAmelCase_ : Dict = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :float , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :str , __snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
__magic_name__ : List[str] =nn.Embedding(__snake_case , __snake_case )
__magic_name__ : int =nn.Embedding(__snake_case , __snake_case )
__magic_name__ : Tuple =False
__magic_name__ : List[str] =nn.Dropout(p=__snake_case )
__magic_name__ : Union[str, Any] =TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
__magic_name__ : List[Any] =nn.ModuleList()
for lyr_num in range(__snake_case ):
__magic_name__ : Any =TaBlock(__snake_case )
self.encoders.append(__snake_case )
__magic_name__ : Union[str, Any] =TaLayerNorm(__snake_case )
__magic_name__ : str =nn.Dropout(p=__snake_case )
def A__ ( self :List[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =self.token_embedder(__snake_case )
__magic_name__ : List[Any] =encoder_input_tokens.shape[1]
__magic_name__ : Any =torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
__magic_name__ : Optional[int] =self.dropout_pre(__snake_case )
# inverted the attention mask
__magic_name__ : Optional[int] =encoder_input_tokens.size()
__magic_name__ : Optional[int] =self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
__magic_name__ : Union[str, Any] =lyr(__snake_case , __snake_case )[0]
__magic_name__ : int =self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 21 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :Tuple , __snake_case :str , __snake_case :List[Any]=7 , __snake_case :Optional[int]=3 , __snake_case :List[str]=18 , __snake_case :Optional[int]=30 , __snake_case :str=4_00 , __snake_case :Dict=True , __snake_case :Optional[Any]=None , __snake_case :List[Any]=True , ):
'''simple docstring'''
__magic_name__ : Tuple =size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : List[Any] =parent
__magic_name__ : Any =batch_size
__magic_name__ : str =num_channels
__magic_name__ : List[str] =image_size
__magic_name__ : str =min_resolution
__magic_name__ : Union[str, Any] =max_resolution
__magic_name__ : Tuple =do_resize
__magic_name__ : Optional[Any] =size
__magic_name__ : Dict =apply_ocr
def A__ ( self :Any ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Dict =LayoutLMvaImageProcessingTester(self )
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Tuple =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
__magic_name__ : Optional[int] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Union[str, Any] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
__magic_name__ : Union[str, Any] =load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__magic_name__ : Dict =Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__magic_name__ : str =image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__magic_name__ : Tuple =[["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__magic_name__ : Any =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
__magic_name__ : Dict =LayoutLMvaImageProcessor(apply_ocr=__snake_case )
__magic_name__ : Union[str, Any] =image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 21 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 1 |
UpperCAmelCase_ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Return True if there is node that has not iterated.
__magic_name__ : Any =[False] * len(lowerCamelCase )
__magic_name__ : Any =[s]
__magic_name__ : Tuple =True
while queue:
__magic_name__ : Any =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
__magic_name__ : Tuple =True
__magic_name__ : Optional[Any] =u
return visited[t]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =[-1] * (len(lowerCamelCase ))
__magic_name__ : str =0
__magic_name__ : Any =[]
__magic_name__ : Dict =[i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =float("""Inf""" )
__magic_name__ : List[Any] =sink
while s != source:
# Find the minimum value in select path
__magic_name__ : Dict =min(lowerCamelCase , graph[parent[s]][s] )
__magic_name__ : int =parent[s]
max_flow += path_flow
__magic_name__ : Optional[Any] =sink
while v != source:
__magic_name__ : List[Any] =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__magic_name__ : List[Any] =parent[v]
for i in range(len(lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 21 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = """nat"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Tuple=4 , __snake_case :int=3 , __snake_case :Union[str, Any]=64 , __snake_case :Optional[Any]=[3, 4, 6, 5] , __snake_case :Tuple=[2, 4, 8, 16] , __snake_case :Optional[int]=7 , __snake_case :Optional[int]=3.0 , __snake_case :int=True , __snake_case :Dict=0.0 , __snake_case :Tuple=0.0 , __snake_case :List[Any]=0.1 , __snake_case :Optional[int]="gelu" , __snake_case :Optional[Any]=0.02 , __snake_case :Optional[int]=1E-5 , __snake_case :List[str]=0.0 , __snake_case :List[str]=None , __snake_case :Optional[int]=None , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Any =patch_size
__magic_name__ : Optional[int] =num_channels
__magic_name__ : Tuple =embed_dim
__magic_name__ : List[Any] =depths
__magic_name__ : Union[str, Any] =len(__snake_case )
__magic_name__ : List[Any] =num_heads
__magic_name__ : int =kernel_size
__magic_name__ : Tuple =mlp_ratio
__magic_name__ : Tuple =qkv_bias
__magic_name__ : Dict =hidden_dropout_prob
__magic_name__ : Dict =attention_probs_dropout_prob
__magic_name__ : Union[str, Any] =drop_path_rate
__magic_name__ : Union[str, Any] =hidden_act
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Union[str, Any] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : List[str] =int(embed_dim * 2 ** (len(__snake_case ) - 1) )
__magic_name__ : List[str] =layer_scale_init_value
__magic_name__ : List[Any] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )]
__magic_name__ , __magic_name__ : Dict =get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
| 21 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class __A :
def __init__( self :Dict ):
'''simple docstring'''
__magic_name__ : list[Any] =[]
__magic_name__ : int =0
__magic_name__ : int =0
def A__ ( self :int ):
'''simple docstring'''
return self.head == self.tail
def A__ ( self :Optional[Any] , __snake_case :Any ):
'''simple docstring'''
self.data.append(__snake_case )
__magic_name__ : Optional[int] =self.tail + 1
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.data[self.head]
__magic_name__ : Any =self.head + 1
return ret
def A__ ( self :Dict ):
'''simple docstring'''
return self.tail - self.head
def A__ ( self :Any ):
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __A :
def __init__( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =data
__magic_name__ : MyNode | None =None
__magic_name__ : MyNode | None =None
__magic_name__ : int =1
def A__ ( self :Any ):
'''simple docstring'''
return self.data
def A__ ( self :int ):
'''simple docstring'''
return self.left
def A__ ( self :int ):
'''simple docstring'''
return self.right
def A__ ( self :str ):
'''simple docstring'''
return self.height
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : int =data
def A__ ( self :Dict , __snake_case :MyNode | None ):
'''simple docstring'''
__magic_name__ : Any =node
def A__ ( self :str , __snake_case :MyNode | None ):
'''simple docstring'''
__magic_name__ : Tuple =node
def A__ ( self :Optional[Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =height
def lowerCAmelCase_ ( lowerCamelCase ):
if node is None:
return 0
return node.get_height()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if a > b:
return a
return b
def lowerCAmelCase_ ( lowerCamelCase ):
print("""left rotation node:""" , node.get_data() )
__magic_name__ : Any =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCamelCase )
__magic_name__ : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
__magic_name__ : List[str] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase )
return ret
def lowerCAmelCase_ ( lowerCamelCase ):
print("""right rotation node:""" , node.get_data() )
__magic_name__ : List[str] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCamelCase )
__magic_name__ : Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
__magic_name__ : str =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase )
return ret
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCamelCase ) )
return right_rotation(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCamelCase ) )
return left_rotation(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if node is None:
return MyNode(lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__magic_name__ : int =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__magic_name__ : Tuple =right_rotation(lowerCamelCase )
else:
__magic_name__ : int =lr_rotation(lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__magic_name__ : List[Any] =node.get_right()
assert right_child is not None
if data < right_child.get_data():
__magic_name__ : Tuple =rl_rotation(lowerCamelCase )
else:
__magic_name__ : Optional[Any] =left_rotation(lowerCamelCase )
__magic_name__ : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase )
return node
def lowerCAmelCase_ ( lowerCamelCase ):
while True:
__magic_name__ : str =root.get_right()
if right_child is None:
break
__magic_name__ : List[Any] =right_child
return root.get_data()
def lowerCAmelCase_ ( lowerCamelCase ):
while True:
__magic_name__ : List[Any] =root.get_left()
if left_child is None:
break
__magic_name__ : Optional[int] =left_child
return root.get_data()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =root.get_left()
__magic_name__ : Union[str, Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__magic_name__ : Union[str, Any] =get_left_most(lowerCamelCase )
root.set_data(lowerCamelCase )
root.set_right(del_node(lowerCamelCase , lowerCamelCase ) )
elif left_child is not None:
__magic_name__ : Any =left_child
elif right_child is not None:
__magic_name__ : Tuple =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowerCamelCase , lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCamelCase , lowerCamelCase ) )
if get_height(lowerCamelCase ) - get_height(lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__magic_name__ : Optional[Any] =left_rotation(lowerCamelCase )
else:
__magic_name__ : Union[str, Any] =rl_rotation(lowerCamelCase )
elif get_height(lowerCamelCase ) - get_height(lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__magic_name__ : Any =right_rotation(lowerCamelCase )
else:
__magic_name__ : Optional[int] =lr_rotation(lowerCamelCase )
__magic_name__ : Dict =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCamelCase )
return root
class __A :
def __init__( self :List[Any] ):
'''simple docstring'''
__magic_name__ : MyNode | None =None
def A__ ( self :str ):
'''simple docstring'''
return get_height(self.root )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
print("""insert:""" + str(__snake_case ) )
__magic_name__ : Optional[int] =insert_node(self.root , __snake_case )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
print("""delete:""" + str(__snake_case ) )
if self.root is None:
print("""Tree is empty!""" )
return
__magic_name__ : str =del_node(self.root , __snake_case )
def __str__( self :List[str] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
__magic_name__ : str =""""""
__magic_name__ : List[Any] =MyQueue()
q.push(self.root )
__magic_name__ : str =self.get_height()
if layer == 0:
return output
__magic_name__ : List[Any] =0
while not q.is_empty():
__magic_name__ : Any =q.pop()
__magic_name__ : str =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__snake_case )
q.push(__snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__magic_name__ : Optional[Any] =cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __snake_case ) - 1:
__magic_name__ : str =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCAmelCase_ : int = AVLtree()
UpperCAmelCase_ : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 21 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , __snake_case :Optional[int]=5_02_65 , __snake_case :Any=5_12 , __snake_case :Tuple=8 , __snake_case :Optional[Any]=20_48 , __snake_case :List[Any]=16 , __snake_case :Any=8 , __snake_case :Union[str, Any]=20_48 , __snake_case :Any=16 , __snake_case :List[str]=0.0 , __snake_case :Dict=0.0 , __snake_case :str=True , __snake_case :Optional[int]=True , __snake_case :Optional[int]="gelu" , __snake_case :Dict=5_12 , __snake_case :Optional[Any]=0.1 , __snake_case :Tuple=0.0 , __snake_case :Optional[Any]=0.0 , __snake_case :Optional[int]=0.02 , __snake_case :Optional[int]=1 , __snake_case :str=False , __snake_case :List[Any]=0 , __snake_case :int=1 , __snake_case :List[Any]=2 , __snake_case :Optional[Any]=2 , **__snake_case :str , ):
'''simple docstring'''
__magic_name__ : int =vocab_size
__magic_name__ : Optional[Any] =max_position_embeddings
__magic_name__ : Optional[Any] =d_model
__magic_name__ : str =encoder_ffn_dim
__magic_name__ : Tuple =encoder_layers
__magic_name__ : List[str] =encoder_attention_heads
__magic_name__ : Union[str, Any] =decoder_ffn_dim
__magic_name__ : int =decoder_layers
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : Tuple =dropout
__magic_name__ : List[str] =attention_dropout
__magic_name__ : int =activation_dropout
__magic_name__ : Union[str, Any] =activation_function
__magic_name__ : Any =init_std
__magic_name__ : Any =encoder_layerdrop
__magic_name__ : Optional[int] =decoder_layerdrop
__magic_name__ : int =use_cache
__magic_name__ : Dict =encoder_layers
__magic_name__ : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : Tuple ={0: """batch"""}
__magic_name__ : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Optional[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ , __magic_name__ : List[Any] =self.num_layers
for i in range(__snake_case ):
__magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Dict =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] =super().outputs
else:
__magic_name__ : Optional[Any] =super(__snake_case , self ).outputs
if self.use_past:
__magic_name__ , __magic_name__ : int =self.num_layers
for i in range(__snake_case ):
__magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self :List[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
__magic_name__ : List[str] =seq_length if not self.use_past else 1
__magic_name__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__ : Any ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : List[str] =dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape
__magic_name__ : Any =common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ , __magic_name__ : Optional[int] =self.num_attention_heads
__magic_name__ : int =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : List[Any] =decoder_seq_length + 3
__magic_name__ : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : Any =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
__magic_name__ : Optional[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ , __magic_name__ : Dict =self.num_layers
__magic_name__ : List[Any] =min(__snake_case , __snake_case )
__magic_name__ : List[Any] =max(__snake_case , __snake_case ) - min_num_layers
__magic_name__ : Optional[Any] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
__magic_name__ : int =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def A__ ( self :List[str] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : int =seqlen + 2
__magic_name__ , __magic_name__ : Tuple =self.num_layers
__magic_name__ , __magic_name__ : List[str] =self.num_attention_heads
__magic_name__ : List[Any] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Union[str, Any] =common_inputs["""attention_mask"""].dtype
__magic_name__ : int =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
__magic_name__ : Dict =[
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def A__ ( self :Any , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : Tuple =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : Any =tokenizer.num_special_tokens_to_add(__snake_case )
__magic_name__ : Optional[Any] =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[str] =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : int =dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def A__ ( self :Optional[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
__magic_name__ : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
__magic_name__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def A__ ( self :List[str] , __snake_case :Any , __snake_case :Dict , __snake_case :Any , __snake_case :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Union[str, Any] =super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
__magic_name__ : Optional[Any] =super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 21 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 1 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : Optional[Any] =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__magic_name__ : List[Any] =1 - (matter_density + radiation_density + dark_energy)
__magic_name__ : str =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__magic_name__ : List[Any] =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCAmelCase_ : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 21 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 1 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :Tuple , *__snake_case :str , **__snake_case :List[Any] ):
'''simple docstring'''
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 21 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """mvp"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , __snake_case :List[Any]=5_02_67 , __snake_case :List[str]=10_24 , __snake_case :Optional[int]=12 , __snake_case :Union[str, Any]=40_96 , __snake_case :List[Any]=16 , __snake_case :Union[str, Any]=12 , __snake_case :int=40_96 , __snake_case :Optional[int]=16 , __snake_case :List[Any]=0.0 , __snake_case :Union[str, Any]=0.0 , __snake_case :Any="gelu" , __snake_case :int=10_24 , __snake_case :Tuple=0.1 , __snake_case :Optional[Any]=0.0 , __snake_case :Tuple=0.0 , __snake_case :Optional[int]=0.02 , __snake_case :Dict=0.0 , __snake_case :Union[str, Any]=False , __snake_case :List[str]=True , __snake_case :List[str]=1 , __snake_case :Optional[int]=0 , __snake_case :Optional[Any]=2 , __snake_case :int=True , __snake_case :List[str]=2 , __snake_case :Any=2 , __snake_case :Union[str, Any]=False , __snake_case :int=1_00 , __snake_case :Optional[Any]=8_00 , **__snake_case :Optional[int] , ):
'''simple docstring'''
__magic_name__ : Tuple =vocab_size
__magic_name__ : str =max_position_embeddings
__magic_name__ : Dict =d_model
__magic_name__ : Any =encoder_ffn_dim
__magic_name__ : str =encoder_layers
__magic_name__ : Tuple =encoder_attention_heads
__magic_name__ : int =decoder_ffn_dim
__magic_name__ : Dict =decoder_layers
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : Dict =dropout
__magic_name__ : str =attention_dropout
__magic_name__ : Tuple =activation_dropout
__magic_name__ : List[Any] =activation_function
__magic_name__ : List[Any] =init_std
__magic_name__ : List[Any] =encoder_layerdrop
__magic_name__ : Optional[Any] =decoder_layerdrop
__magic_name__ : List[Any] =classifier_dropout
__magic_name__ : Dict =use_cache
__magic_name__ : Optional[int] =encoder_layers
__magic_name__ : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ : Any =use_prompt
__magic_name__ : Tuple =prompt_length
__magic_name__ : List[Any] =prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
__magic_name__ : Optional[int] =self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
UpperCAmelCase_ : str = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
__magic_name__ : Tuple =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase , default=0.1_5 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase , required=lowerCamelCase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" )
__magic_name__ : List[Any] =parser.parse_args()
return args
def lowerCAmelCase_ ( lowerCamelCase ):
try:
if args.tpu_name:
__magic_name__ : List[str] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__magic_name__ : List[str] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase )
return tpu
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[Any] =0
for file in file_list:
__magic_name__ : str =file.split("""/""" )[-1]
__magic_name__ : Tuple =re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase ).group(1 )
__magic_name__ : int =int(lowerCamelCase )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
__magic_name__ : Optional[Any] =count_samples(lowerCamelCase )
__magic_name__ : Optional[int] =tf.data.Dataset.from_tensor_slices(lowerCamelCase )
if shuffle:
__magic_name__ : Union[str, Any] =dataset.shuffle(len(lowerCamelCase ) )
__magic_name__ : List[str] =tf.data.TFRecordDataset(lowerCamelCase , num_parallel_reads=lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__magic_name__ : Union[str, Any] =dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase ) )
__magic_name__ : Dict =dataset.map(lowerCamelCase , num_parallel_calls=lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
__magic_name__ : Tuple =dataset.shuffle(args.shuffle_buffer_size )
__magic_name__ : Optional[int] =dataset.batch(lowerCamelCase , drop_remainder=lowerCamelCase )
__magic_name__ : Any =dataset.map(lowerCamelCase , num_parallel_calls=lowerCamelCase )
__magic_name__ : int =dataset.prefetch(lowerCamelCase )
return dataset
def lowerCAmelCase_ ( lowerCamelCase ):
if not args.no_tpu:
__magic_name__ : Optional[int] =initialize_tpu(lowerCamelCase )
__magic_name__ : str =tf.distribute.TPUStrategy(lowerCamelCase )
else:
__magic_name__ : Optional[Any] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
__magic_name__ : List[str] =AutoTokenizer.from_pretrained(args.tokenizer )
__magic_name__ : Tuple =AutoConfig.from_pretrained(args.pretrained_model_config )
__magic_name__ : Union[str, Any] =tokenizer.vocab_size
__magic_name__ : str =tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
__magic_name__ : int =tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
__magic_name__ : Tuple =count_samples(lowerCamelCase )
__magic_name__ : Any =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__magic_name__ : Any =steps_per_epoch * args.num_epochs
with strategy.scope():
__magic_name__ : Optional[Any] =TFAutoModelForMaskedLM.from_config(lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__magic_name__ , __magic_name__ : Dict =create_optimizer(
num_train_steps=lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase , metrics=["""accuracy"""] )
def decode_fn(lowerCamelCase ):
__magic_name__ : Optional[int] ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase , lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__magic_name__ : Optional[Any] =DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm_probability=args.mlm_probability , mlm=lowerCamelCase , return_tensors="""tf""" )
def mask_with_collator(lowerCamelCase ):
# TF really needs an isin() function
__magic_name__ : Dict =(
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
__magic_name__ , __magic_name__ : Union[str, Any] =data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase , )
return batch
__magic_name__ : Optional[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
__magic_name__ : Dict =prepare_dataset(
lowerCamelCase , decode_fn=lowerCamelCase , mask_fn=lowerCamelCase , batch_size=lowerCamelCase , shuffle=lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__magic_name__ : List[str] =prepare_dataset(
lowerCamelCase , decode_fn=lowerCamelCase , mask_fn=lowerCamelCase , batch_size=lowerCamelCase , shuffle=lowerCamelCase , )
__magic_name__ : Optional[int] =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase ) )
model.fit(
lowerCamelCase , validation_data=lowerCamelCase , epochs=args.num_epochs , callbacks=lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = parse_args()
main(args)
| 21 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCAmelCase_ : str = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ : List[str] = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ : List[Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCAmelCase_ : Optional[Any] = model.state_dict()
UpperCAmelCase_ : List[str] = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ : str = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCAmelCase_ : Tuple = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCAmelCase_ : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase_ : List[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCAmelCase_ : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCAmelCase_ : int = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCAmelCase_ : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCAmelCase_ : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCAmelCase_ : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCAmelCase_ : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCAmelCase_ : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCAmelCase_ : Any = state_dict["cls.predictions.decoder.weight"]
UpperCAmelCase_ : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ : int = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCAmelCase_ : Optional[Any] = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 21 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCamelCase )] )
__magic_name__ : str =np.array(lowerCamelCase )
__magic_name__ : Union[str, Any] =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCamelCase ) ) , x.transpose() ) , lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : int =(1, 2, 1)
__magic_name__ : Tuple =(1, 1, 0, 7)
__magic_name__ : int =SARIMAX(
lowerCamelCase , exog=lowerCamelCase , order=lowerCamelCase , seasonal_order=lowerCamelCase )
__magic_name__ : Optional[Any] =model.fit(disp=lowerCamelCase , maxiter=600 , method="""nm""" )
__magic_name__ : Any =model_fit.predict(1 , len(lowerCamelCase ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] =regressor.predict(lowerCamelCase )
return y_pred[0]
def lowerCAmelCase_ ( lowerCamelCase ):
train_user.sort()
__magic_name__ : List[Any] =np.percentile(lowerCamelCase , 25 )
__magic_name__ : Dict =np.percentile(lowerCamelCase , 75 )
__magic_name__ : int =qa - qa
__magic_name__ : List[Any] =qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =0
__magic_name__ : Any =0
for i in list_vote:
if i > actual_result:
__magic_name__ : List[str] =not_safe + 1
else:
if abs(abs(lowerCamelCase ) - abs(lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : Tuple = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
UpperCAmelCase_ : str = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase_ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : Dict = normalize_df[:, 2].tolist()
UpperCAmelCase_ : List[str] = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : List[Any] = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[Any] = x[: len(x) - 1]
UpperCAmelCase_ : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : Dict = total_date[: len(total_date) - 1]
UpperCAmelCase_ : Optional[Any] = total_user[: len(total_user) - 1]
UpperCAmelCase_ : int = total_match[: len(total_match) - 1]
UpperCAmelCase_ : int = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : Dict = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 21 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = XGLMTokenizer
UpperCamelCase = XGLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : Optional[Any] =XGLMTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple ="""<pad>"""
__magic_name__ : Tuple =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__snake_case ) , 10_08 )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Dict =XGLMTokenizer(__snake_case , keep_accents=__snake_case )
__magic_name__ : Any =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ : List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__magic_name__ : Tuple =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def A__ ( self :str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def A__ ( self :int ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
__magic_name__ : Tuple =XGLMTokenizer(f.name , keep_accents=__snake_case )
__magic_name__ : Optional[Any] =pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ : List[str] =self.get_tokenizer()
__magic_name__ : Dict =self.get_rust_tokenizer()
__magic_name__ : Tuple ="""I was born in 92000, and this is falsé."""
__magic_name__ : List[str] =tokenizer.tokenize(__snake_case )
__magic_name__ : Tuple =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__ : List[str] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__ : Dict =self.get_rust_tokenizer()
__magic_name__ : Dict =tokenizer.encode(__snake_case )
__magic_name__ : List[str] =rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Any ="""Hello World!"""
__magic_name__ : List[Any] =[2, 3_12_27, 44_47, 35]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__magic_name__ : Tuple =[2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : int ={
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""facebook/xglm-564M""" , padding=__snake_case , )
| 21 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :str , __snake_case :str=12_81_12 , __snake_case :str=10_24 , __snake_case :List[Any]=12 , __snake_case :Union[str, Any]=40_96 , __snake_case :Any=16 , __snake_case :Union[str, Any]=12 , __snake_case :int=40_96 , __snake_case :List[Any]=16 , __snake_case :Optional[int]=0.05 , __snake_case :Tuple=0.05 , __snake_case :Dict=True , __snake_case :Optional[int]=True , __snake_case :str="relu" , __snake_case :List[str]=10_24 , __snake_case :Union[str, Any]=0.1 , __snake_case :List[Any]=0.1 , __snake_case :int=0.0 , __snake_case :int=0.02 , __snake_case :Union[str, Any]=2 , __snake_case :Union[str, Any]=True , __snake_case :Optional[int]=False , __snake_case :Any="float32" , __snake_case :Union[str, Any]=False , __snake_case :Tuple=1_28 , __snake_case :str=64 , __snake_case :Dict=4 , __snake_case :Tuple=4 , __snake_case :Optional[Any]=0.001 , __snake_case :Optional[Any]=0.001 , __snake_case :List[Any]="all" , __snake_case :int=False , __snake_case :List[Any]=False , __snake_case :Optional[int]=1.0 , __snake_case :List[str]=0.2 , __snake_case :int=1 , __snake_case :Dict=0 , __snake_case :List[str]=2 , __snake_case :Dict=False , **__snake_case :Optional[Any] , ):
'''simple docstring'''
__magic_name__ : Tuple =vocab_size
__magic_name__ : Optional[Any] =max_position_embeddings
__magic_name__ : List[Any] =d_model
__magic_name__ : Union[str, Any] =encoder_ffn_dim
__magic_name__ : List[Any] =encoder_layers
__magic_name__ : Optional[int] =encoder_attention_heads
__magic_name__ : Optional[Any] =decoder_ffn_dim
__magic_name__ : List[str] =decoder_layers
__magic_name__ : Any =decoder_attention_heads
__magic_name__ : int =dropout
__magic_name__ : Any =attention_dropout
__magic_name__ : Optional[Any] =activation_dropout
__magic_name__ : int =activation_function
__magic_name__ : str =init_std
__magic_name__ : Optional[int] =encoder_layerdrop
__magic_name__ : str =decoder_layerdrop
__magic_name__ : Optional[int] =use_cache
__magic_name__ : Union[str, Any] =encoder_layers
__magic_name__ : int =scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ : Any =router_z_loss_coef
__magic_name__ : Tuple =router_aux_loss_coef
__magic_name__ : Optional[Any] =decoder_sparse_step
__magic_name__ : Dict =encoder_sparse_step
__magic_name__ : List[Any] =num_experts
__magic_name__ : Tuple =expert_capacity
__magic_name__ : Union[str, Any] =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__magic_name__ : str =router_dtype
__magic_name__ : Union[str, Any] =router_ignore_padding_tokens
__magic_name__ : Dict =batch_prioritized_routing
__magic_name__ : Optional[Any] =second_expert_policy
__magic_name__ : Tuple =normalize_router_prob_before_dropping
__magic_name__ : Any =moe_eval_capacity_token_fraction
__magic_name__ : List[str] =moe_token_dropout
__magic_name__ : str =output_router_logits
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 21 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.