code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """wavlm"""
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int]=32 , lowerCamelCase : str=768 , lowerCamelCase : List[str]=12 , lowerCamelCase : int=12 , lowerCamelCase : List[Any]=3_072 , lowerCamelCase : str="gelu" , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Union[str, Any]=1e-5 , lowerCamelCase : str="group" , lowerCamelCase : Union[str, Any]="gelu" , lowerCamelCase : str=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase : int=False , lowerCamelCase : str=128 , lowerCamelCase : str=16 , lowerCamelCase : Tuple=320 , lowerCamelCase : Any=800 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Tuple=True , lowerCamelCase : int=0.05 , lowerCamelCase : Optional[int]=10 , lowerCamelCase : str=2 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[Any]=10 , lowerCamelCase : Dict=320 , lowerCamelCase : Tuple=2 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[int]=100 , lowerCamelCase : Any=256 , lowerCamelCase : Optional[Any]=256 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : str="mean" , lowerCamelCase : List[Any]=False , lowerCamelCase : str=False , lowerCamelCase : Dict=256 , lowerCamelCase : str=(512, 512, 512, 512, 1_500) , lowerCamelCase : Optional[Any]=(5, 3, 3, 1, 1) , lowerCamelCase : List[str]=(1, 2, 3, 1, 1) , lowerCamelCase : int=512 , lowerCamelCase : List[str]=80 , lowerCamelCase : Tuple=0 , lowerCamelCase : Tuple=1 , lowerCamelCase : Tuple=2 , lowerCamelCase : Any=False , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Dict=2 , lowerCamelCase : int=3 , lowerCamelCase : Dict=None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = xvector_output_dim
@property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
snake_case__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCamelCase : Dict , **lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "decord" )
self.check_model_type(lowerCamelCase )
def _snake_case ( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None ):
'''simple docstring'''
__lowercase = {}
if frame_sampling_rate is not None:
__lowercase = frame_sampling_rate
if num_frames is not None:
__lowercase = num_frames
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , lowerCamelCase : Union[str, List[str]] , **lowerCamelCase : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Any=None , lowerCamelCase : Tuple=1 ):
'''simple docstring'''
if num_frames is None:
__lowercase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__lowercase = BytesIO(requests.get(lowerCamelCase ).content )
__lowercase = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__lowercase = 0
__lowercase = num_frames * frame_sampling_rate - 1
__lowercase = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__lowercase = videoreader.get_batch(lowerCamelCase ).asnumpy()
__lowercase = list(lowerCamelCase )
__lowercase = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = self.model(**lowerCamelCase )
return model_outputs
def _snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : Any=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(lowerCamelCase )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
'''simple docstring'''
@staticmethod
def _snake_case ( *lowerCamelCase : Any , **lowerCamelCase : Dict ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
} , )
import datasets
__lowercase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowercase = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
__lowercase = object_detector(lowerCamelCase , threshold=0.0 )
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = "hf-internal-testing/tiny-detr-mobilenetsv3"
__lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
__lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
__lowercase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = "facebook/detr-resnet-50"
__lowercase = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
__lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__lowercase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "facebook/detr-resnet-50"
__lowercase = pipeline("object-detection" , model=lowerCamelCase )
__lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__lowercase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 0.9985
__lowercase = "facebook/detr-resnet-50"
__lowercase = pipeline("object-detection" , model=lowerCamelCase )
__lowercase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = "Narsil/layoutlmv3-finetuned-funsd"
__lowercase = 0.9993
__lowercase = pipeline("object-detection" , model=lowerCamelCase , threshold=lowerCamelCase )
__lowercase = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 - _cos) / 2
__lowercase = 1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 + _cos) / 2
__lowercase = -1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = _sin / 2
__lowercase = 0
__lowercase = -ba
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1 - alpha
__lowercase = -2 * _cos
__lowercase = 1 + alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1_0 ** (gain_db / 4_0)
__lowercase = 1 + alpha * big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha * big_a
__lowercase = 1 + alpha / big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha / big_a
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1_0 ** (gain_db / 4_0)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
__lowercase = big_a * (pmc + aaa)
__lowercase = 2 * big_a * mpc
__lowercase = big_a * (pmc - aaa)
__lowercase = ppmc + aaa
__lowercase = -2 * pmpc
__lowercase = ppmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
__lowercase = tau * frequency / samplerate
__lowercase = sin(_SCREAMING_SNAKE_CASE )
__lowercase = cos(_SCREAMING_SNAKE_CASE )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1_0 ** (gain_db / 4_0)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
__lowercase = big_a * (ppmc + aaa)
__lowercase = -2 * big_a * pmpc
__lowercase = big_a * (ppmc - aaa)
__lowercase = pmc + aaa
__lowercase = 2 * mpc
__lowercase = pmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class _A :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Dict=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[Any]=6.0 , lowerCamelCase : str=None , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]="fp4" , lowerCamelCase : str=False , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowercase = load_in_abit
__lowercase = load_in_abit
__lowercase = llm_inta_threshold
__lowercase = llm_inta_skip_modules
__lowercase = llm_inta_enable_fpaa_cpu_offload
__lowercase = llm_inta_has_fpaa_weight
__lowercase = bnb_abit_quant_type
__lowercase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowercase = torch.floataa
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , torch.dtype ):
__lowercase = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def _snake_case ( self : List[str] ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , lowerCamelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCamelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCamelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCamelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCamelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCamelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def _snake_case ( self : str ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _snake_case ( cls : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = cls(**lowerCamelCase )
__lowercase = []
for key, value in kwargs.items():
if hasattr(lowerCamelCase , lowerCamelCase ):
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
to_remove.append(lowerCamelCase )
for key in to_remove:
kwargs.pop(lowerCamelCase , lowerCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] ):
'''simple docstring'''
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
__lowercase = self.to_dict()
__lowercase = json.dumps(lowerCamelCase , indent=2 , sort_keys=lowerCamelCase ) + "\n"
writer.write(lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : List[str] ):
'''simple docstring'''
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _snake_case ( self : int , lowerCamelCase : bool = True ):
'''simple docstring'''
if use_diff is True:
__lowercase = self.to_diff_dict()
else:
__lowercase = self.to_dict()
return json.dumps(lowerCamelCase , indent=2 , sort_keys=lowerCamelCase ) + "\n"
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.to_dict()
# get the default config dict
__lowercase = BitsAndBytesConfig().to_dict()
__lowercase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowercase = value
return serializable_config_dict
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ):
__lowercase = None
if token is not None:
__lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowercase = "636036"
__lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
__lowercase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowercase = workflow_run["id"]
break
return workflow_run_id
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
__lowercase = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowercase = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = {}
for artifact_name in artifact_names:
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
__lowercase = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
__lowercase = f.read().decode("UTF-8" )
return results
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
import inspect
import unittest
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self : List[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(lowerCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = "k-diffusion"
elif backend == "invisible_watermark":
__lowercase = "invisible-watermark"
assert backend in deps, f"""{backend} is not in the deps table!"""
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
snake_case__ : Optional[int] = False
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 12
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return 12
@property
def _snake_case ( self : int ):
'''simple docstring'''
return 32
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__lowercase = TransformeraDModel(**lowerCamelCase )
return model
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = "cpu"
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , transformer=lowerCamelCase , scheduler=lowerCamelCase , learned_classifier_free_sampling_embeddings=lowerCamelCase , )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = "teddy bear playing in the pool"
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] , generator=lowerCamelCase , output_type="np" , return_dict=lowerCamelCase , num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "cpu"
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , transformer=lowerCamelCase , scheduler=lowerCamelCase , learned_classifier_free_sampling_embeddings=lowerCamelCase , )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = "teddy bear playing in the pool"
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] , generator=lowerCamelCase , output_type="np" , return_dict=lowerCamelCase , num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
__lowercase = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
__lowercase = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=lowerCamelCase , output_type="np" , )
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Union[str, Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
from torch import nn
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__lowercase = class_size
__lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.mlp(lowerCamelCase )
return logits
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
for part_id in partition_order:
__lowercase = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(_SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(1_0_0 ).repartition(1 )
__lowercase = Spark(_SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(1_0 ).repartition(2 )
__lowercase = [1, 0]
__lowercase = _generate_iterable_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Reverse the partitions.
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(1_0 ).repartition(1 )
__lowercase = SparkExamplesIterable(_SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
__lowercase = lambda _SCREAMING_SNAKE_CASE : x.reverse()
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [2, 1, 0] )
__lowercase = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shuffle_data_sources(_SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
__lowercase = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowercase = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
__lowercase = spark.range(1_0_0 ).repartition(1 )
__lowercase = Spark(_SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 655 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = (3, 32, 128)
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
__lowercase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__lowercase = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Union[str, Any] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : str , **lowerCamelCase : List[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowercase = Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) )
return image_input
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase , return_tensors="np" )
__lowercase = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = "test"
__lowercase = processor(text=lowerCamelCase )
__lowercase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = "test"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.char_decode(lowerCamelCase )
__lowercase = tokenizer.batch_decode(lowerCamelCase )
__lowercase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = None
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = torch.randn(1 , 27 , 38 )
__lowercase = torch.randn(1 , 27 , 50_257 )
__lowercase = torch.randn(1 , 27 , 30_522 )
__lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
import argparse
import json
import subprocess
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
__lowercase = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
__lowercase = output.stdout.decode("utf-8" )
__lowercase = json.loads(_SCREAMING_SNAKE_CASE )
__lowercase = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowercase = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return values.split("," )
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
snake_case__ : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 655 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
__lowercase , __lowercase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ : Dict = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 655 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 1 |
from ...processing_utils import ProcessorMixin
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : int = """WhisperFeatureExtractor"""
_snake_case : Tuple = """WhisperTokenizer"""
def __init__( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase , lowerCamelCase )
__lowercase = self.feature_extractor
__lowercase = False
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase , language=lowerCamelCase , no_timestamps=lowerCamelCase )
def __call__( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
__lowercase = kwargs.pop("audio" , lowerCamelCase )
__lowercase = kwargs.pop("sampling_rate" , lowerCamelCase )
__lowercase = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowercase = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def _snake_case ( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any]="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowerCamelCase , return_tensors=lowerCamelCase )
| 655 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ( ):
raise RuntimeError("CUDA out of memory." )
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Linear(3 , 4 )
__lowercase = nn.BatchNormad(4 )
__lowercase = nn.Linear(4 , 5 )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase : Tuple ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase : str , lowerCamelCase : Dict ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__lowercase , __lowercase = mock_training_loop_function("hello" )
self.assertListEqual(lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _snake_case ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase : List[Any] ):
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _snake_case ( self : Any ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase : str ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch.cuda.memory_allocated()
__lowercase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase )
__lowercase = release_memory(lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase )
| 655 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 655 |
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__lowercase = value
else:
__lowercase = value
return new_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:2_5_6, :]
__lowercase = in_proj_bias[:2_5_6]
__lowercase = in_proj_weight[2_5_6:5_1_2, :]
__lowercase = in_proj_bias[2_5_6:5_1_2]
__lowercase = in_proj_weight[-2_5_6:, :]
__lowercase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:2_5_6, :]
__lowercase = in_proj_bias[:2_5_6]
__lowercase = in_proj_weight[2_5_6:5_1_2, :]
__lowercase = in_proj_bias[2_5_6:5_1_2]
__lowercase = in_proj_weight[-2_5_6:, :]
__lowercase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__lowercase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowercase = in_proj_weight_cross_attn[:2_5_6, :]
__lowercase = in_proj_bias_cross_attn[:2_5_6]
__lowercase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__lowercase = in_proj_bias_cross_attn[2_5_6:5_1_2]
__lowercase = in_proj_weight_cross_attn[-2_5_6:, :]
__lowercase = in_proj_bias_cross_attn[-2_5_6:]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = image.size
__lowercase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = 8_0_0 if "detection" in checkpoint_url else 1_0_0_0
__lowercase = target_max_size / current_max_size
__lowercase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = F.to_tensor(_SCREAMING_SNAKE_CASE )
__lowercase = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("Converting model..." )
# load original state dict
__lowercase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
# create HuggingFace model and load state dict
__lowercase = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__lowercase = 1_5
__lowercase = 2
__lowercase = {0: "table", 1: "table rotated"}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_2_5
__lowercase = 6
__lowercase = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = DetrImageProcessor(
format="coco_detection" , max_size=8_0_0 if "detection" in checkpoint_url else 1_0_0_0 )
__lowercase = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
__lowercase = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
__lowercase = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
__lowercase = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
__lowercase = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
__lowercase = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
__lowercase = (1, 1_5, 3)
__lowercase = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
__lowercase = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
__lowercase = (1, 1_2_5, 7)
__lowercase = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
__lowercase = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
__lowercase = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : List[str] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case__ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class _A :
'''simple docstring'''
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=_lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=_lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=_lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=_lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = {}
if self.train_dir is not None:
__lowercase = self.train_dir
if self.validation_dir is not None:
__lowercase = self.validation_dir
__lowercase = data_files if data_files else None
@dataclass
class _A :
'''simple docstring'''
_snake_case : str = field(
default=_lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=_lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=_lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : float = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowercase = ds["train"].train_test_split(data_args.train_val_split )
__lowercase = split["train"]
__lowercase = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowercase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__lowercase = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__lowercase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowercase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__lowercase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__lowercase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__lowercase = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
__lowercase = ds["train"].column_names
else:
__lowercase = ds["validation"].column_names
if data_args.image_column_name is not None:
__lowercase = data_args.image_column_name
elif "image" in column_names:
__lowercase = "image"
elif "img" in column_names:
__lowercase = "img"
else:
__lowercase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__lowercase = image_processor.size["shortest_edge"]
else:
__lowercase = (image_processor.size["height"], image_processor.size["width"])
__lowercase = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_SCREAMING_SNAKE_CASE ):
__lowercase = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__lowercase = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__lowercase = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
__lowercase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__lowercase = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
__lowercase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowercase = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 |
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowercase = 0
__lowercase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowercase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowercase = i
__lowercase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.dummy_uncond_unet
__lowercase = ScoreSdeVeScheduler()
__lowercase = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase ).images
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase , return_dict=lowerCamelCase )[
0
]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = "google/ncsnpp-church-256"
__lowercase = UNetaDModel.from_pretrained(lowerCamelCase )
__lowercase = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase )
__lowercase = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowercase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
snake_case__ : str = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
snake_case__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def snake_case_ ( ):
__lowercase = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case_ ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 1_1_0 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def snake_case_ ( ):
__lowercase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case_ ( ):
__lowercase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowercase = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def snake_case_ ( ):
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def snake_case_ ( ):
# laplace diagonals
__lowercase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__lowercase = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def snake_case_ ( ):
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def snake_case_ ( ):
__lowercase , __lowercase = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def snake_case_ ( ):
__lowercase = sp.make_sepia(_SCREAMING_SNAKE_CASE , 2_0 )
assert sepia.all()
def snake_case_ ( _SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ):
__lowercase = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def snake_case_ ( _SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ):
__lowercase = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def snake_case_ ( ):
__lowercase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowercase = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__lowercase = 0
__lowercase = 0
__lowercase = image[x_coordinate][y_coordinate]
__lowercase = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowercase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowercase = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case__ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case__ : Dict = 12_80_22
snake_case__ : Any = 12_80_28
@require_sentencepiece
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = MaMaaaTokenizer
_snake_case : List[str] = False
_snake_case : Optional[Any] = False
_snake_case : Union[str, Any] = True
def _snake_case ( self : Dict ):
'''simple docstring'''
super().setUp()
__lowercase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__lowercase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : List[Any] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : List[Any] , lowerCamelCase : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "</s>"
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2, 3, 4, 5, 6] , )
__lowercase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__lowercase = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertEqual(lowerCamelCase , "This is a test" )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : int = """facebook/m2m100_418M"""
_snake_case : List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
_snake_case : Any = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
_snake_case : List[Any] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def _snake_case ( cls : Optional[int] ):
'''simple docstring'''
__lowercase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__lowercase = 1
return cls
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = "en"
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
self.assertIn(lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
__lowercase = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
__lowercase = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase )
__lowercase = MaMaaaTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase )
@require_torch
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = "en"
__lowercase = "fr"
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase , return_tensors="pt" )
__lowercase = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowercase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowercase = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowercase = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
from PIL import Image
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = image.size
__lowercase = 0
__lowercase = image.load()
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
__lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
__lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case__ : int = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Dict = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _A :
'''simple docstring'''
_snake_case : List[Any] = PegasusConfig
_snake_case : Union[str, Any] = {}
_snake_case : List[Any] = """gelu"""
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=99 , lowerCamelCase : Dict=32 , lowerCamelCase : Union[str, Any]=5 , lowerCamelCase : Any=4 , lowerCamelCase : int=37 , lowerCamelCase : int=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Dict=20 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : str=1 , lowerCamelCase : Optional[int]=0 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
__lowercase = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Tuple = True
_snake_case : Any = False
_snake_case : Optional[int] = False
_snake_case : int = False
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = FlaxPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__lowercase = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : int ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase )
__lowercase = np.ones((1, 1) )
__lowercase = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase = tokenizer(lowerCamelCase , return_tensors="np" , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase )
__lowercase = model.generate(**lowerCamelCase , num_beams=2 ).sequences
__lowercase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
assert tgt_text == decoded
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _A :
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : Optional[int]=7 , lowerCamelCase : int=True , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=99 , lowerCamelCase : Any=32 , lowerCamelCase : Dict=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : str=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]=50 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=True , lowerCamelCase : Any=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = use_labels
__lowercase = scope
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowercase = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase , attention_mask=lowerCamelCase )
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int , **lowerCamelCase : Optional[int] , ):
'''simple docstring'''
__lowercase = True
__lowercase = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__lowercase = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = True
__lowercase = True
__lowercase = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__lowercase = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__lowercase = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , *lowerCamelCase : Tuple , ):
'''simple docstring'''
__lowercase = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = BertGenerationEncoderTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__lowercase = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
__lowercase = model(lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , lowerCamelCase )
__lowercase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__lowercase = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
__lowercase = model(lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , lowerCamelCase )
__lowercase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from __future__ import annotations
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
_SCREAMING_SNAKE_CASE , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case__ : Dict = False
snake_case__ : Dict = False
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return TrainCommand(_SCREAMING_SNAKE_CASE )
class _A ( _lowercase ):
'''simple docstring'''
@staticmethod
def _snake_case ( lowerCamelCase : ArgumentParser ):
'''simple docstring'''
__lowercase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=lowerCamelCase , required=lowerCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=lowerCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=lowerCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=lowerCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=lowerCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=lowerCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=lowerCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=lowerCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=lowerCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=lowerCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=lowerCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=lowerCamelCase , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=lowerCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=lowerCamelCase )
def __init__( self : int , lowerCamelCase : Namespace ):
'''simple docstring'''
__lowercase = logging.get_logger("transformers-cli/training" )
__lowercase = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=lowerCamelCase )
__lowercase = args.output
__lowercase = args.column_label
__lowercase = args.column_text
__lowercase = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__lowercase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowercase = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__lowercase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowercase = args.validation_split
__lowercase = args.train_batch_size
__lowercase = args.valid_batch_size
__lowercase = args.learning_rate
__lowercase = args.adam_epsilon
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
def _snake_case ( self : int ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Dict="" , lowerCamelCase : int="train" ):
'''simple docstring'''
assert os.path.isdir(lowerCamelCase )
__lowercase = []
__lowercase = os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self : Dict ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : int , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = self.documents[idx]
__lowercase = document_path.split("/" )[-1]
with open(lowerCamelCase , encoding="utf-8" ) as source:
__lowercase = source.read()
__lowercase , __lowercase = process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = list(filter(lambda _SCREAMING_SNAKE_CASE : len(_SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase = [_add_missing_period(_SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
__lowercase = []
__lowercase = deque(_SCREAMING_SNAKE_CASE )
while True:
try:
__lowercase = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(_SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase = list(filter(lambda _SCREAMING_SNAKE_CASE : not t.startswith("@highlight" ) , _SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_SCREAMING_SNAKE_CASE )) )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = torch.ones_like(_SCREAMING_SNAKE_CASE )
__lowercase = sequence == pad_token_id
__lowercase = 0
return mask
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in story_lines]
__lowercase = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in summary_lines]
__lowercase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
for sequence in batch:
__lowercase = -1
__lowercase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_SCREAMING_SNAKE_CASE )
return torch.tensor(_SCREAMING_SNAKE_CASE )
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
import argparse
from collections import defaultdict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__lowercase = f.readlines()
__lowercase = F"""class {class_name}("""
__lowercase = F"""{4 * ' '}def {test_name}("""
__lowercase = F"""{8 * ' '}{correct_line.split()[0]}"""
__lowercase = F"""{1_6 * ' '}{correct_line.split()[0]}"""
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = 0
__lowercase = 0
__lowercase = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
__lowercase = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
__lowercase = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
__lowercase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowercase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowercase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
__lowercase = __lowercase = __lowercase = __lowercase = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__lowercase = {l.strip() for l in f.readlines()}
else:
__lowercase = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__lowercase = f.readlines()
__lowercase = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
__lowercase , __lowercase , __lowercase , __lowercase = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
snake_case__ : Optional[int] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 655 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _A :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Any , ):
'''simple docstring'''
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 32
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = "gelu"
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 512
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = None
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Tuple ):
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = TFEsmModel(config=lowerCamelCase )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowerCamelCase )
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Any , ):
'''simple docstring'''
__lowercase = True
__lowercase = TFEsmModel(config=lowerCamelCase )
__lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
__lowercase = model(lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowerCamelCase , encoder_hidden_states=lowerCamelCase )
# Also check the case where encoder outputs are not passed
__lowercase = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = TFEsmForMaskedLM(config=lowerCamelCase )
__lowercase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFEsmForTokenClassification(config=lowerCamelCase )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _A ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : str = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = False
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = TFEsmModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def _snake_case ( self : int ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEsmModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowercase = model.get_bias()
assert isinstance(lowerCamelCase , lowerCamelCase )
for k, v in name.items():
assert isinstance(lowerCamelCase , tf.Variable )
else:
__lowercase = model.get_output_embeddings()
assert x is None
__lowercase = model.get_bias()
assert name is None
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowerCamelCase )[0]
__lowercase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase )
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowercase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase = model(lowerCamelCase )[0]
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ : str = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
snake_case__ : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
snake_case__ : Dict = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
snake_case__ : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [[float("inf" ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
__lowercase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
snake_case__ : Dict = int(input("""Enter number of vertices: """))
snake_case__ : str = int(input("""Enter number of edges: """))
snake_case__ : str = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
snake_case__ : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
snake_case__ : Union[str, Any] = int(input("""Enter source:"""))
snake_case__ : Optional[Any] = int(input("""Enter destination:"""))
snake_case__ : int = float(input("""Enter weight:"""))
snake_case__ : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 655 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 ):
__lowercase = set(range(3 , _SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
__lowercase = [float(_SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 655 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Dict = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """autoformer"""
_snake_case : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "student_t" , lowerCamelCase : str = "nll" , lowerCamelCase : int = 1 , lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase : bool = True , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : int = 64 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 32 , lowerCamelCase : int = 32 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : int = 100 , lowerCamelCase : float = 0.02 , lowerCamelCase : bool = True , lowerCamelCase : List[Any]=True , lowerCamelCase : int = 10 , lowerCamelCase : int = 25 , lowerCamelCase : int = 3 , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowercase = prediction_length
__lowercase = context_length if context_length is not None else prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
# Autoformer
__lowercase = label_length
__lowercase = moving_average
__lowercase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 655 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case__ : List[Any] = logging.getLogger(__name__)
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = False
def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if not self.initialized:
__lowercase = RagRetriever(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
__lowercase = True
def _snake_case ( self : Dict ):
'''simple docstring'''
self.retriever.index.init_index()
def _snake_case ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase , __lowercase = self.retriever._main_retrieve(lowerCamelCase , lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCamelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
__lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for worker in self.retrieval_workers
] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Tuple ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase = ray.get(random_worker.retrieve.remote(lowerCamelCase , lowerCamelCase ) )
else:
__lowercase , __lowercase = self._main_retrieve(lowerCamelCase , lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : int ):
'''simple docstring'''
return super(lowerCamelCase , cls ).get_tokenizers(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : str=None , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = kwargs.pop("config" , lowerCamelCase ) or RagConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = RagTokenizer.from_pretrained(lowerCamelCase , config=lowerCamelCase )
__lowercase = rag_tokenizer.question_encoder
__lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase = "custom"
__lowercase = CustomHFIndex(config.retrieval_vector_size , lowerCamelCase )
else:
__lowercase = cls._build_index(lowerCamelCase )
return cls(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , retrieval_workers=lowerCamelCase , index=lowerCamelCase , )
| 655 |
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
snake_case__ : List[Any] = Mapping[str, np.ndarray]
snake_case__ : Any = Mapping[str, Any] # Is a nested dict.
snake_case__ : Any = 0.0_1
@dataclasses.dataclass(frozen=_lowercase )
class _A :
'''simple docstring'''
_snake_case : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_snake_case : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_snake_case : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_snake_case : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_snake_case : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_snake_case : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_snake_case : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_snake_case : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_snake_case : Optional[Sequence[int]] = None
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = R"(\[[A-Z]+\]\n)"
__lowercase = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
__lowercase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
__lowercase = ["N", "CA", "C"]
__lowercase = None
__lowercase = None
__lowercase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowercase = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
__lowercase = "X" # FIXME: strings are immutable
__lowercase = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowercase = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
__lowercase = np.array(_SCREAMING_SNAKE_CASE )
__lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowercase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
__lowercase = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ):
__lowercase = []
__lowercase = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
__lowercase = prot.parents
__lowercase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowercase = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
__lowercase = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = pdb_str.split("\n" )
__lowercase = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
__lowercase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__lowercase = []
if prot.parents_chain_index is not None:
__lowercase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
__lowercase = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowercase = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ["N/A"] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowercase = [["N/A"]]
def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str:
return F"""PARENT {' '.join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowercase = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
__lowercase = parents_per_chain[chain_counter]
else:
__lowercase = ["N/A"]
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = residue_constants.restypes + ["X"]
def res_atoa(_SCREAMING_SNAKE_CASE ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
__lowercase = residue_constants.atom_types
__lowercase = []
__lowercase = prot.atom_mask
__lowercase = prot.aatype
__lowercase = prot.atom_positions
__lowercase = prot.residue_index.astype(np.intaa )
__lowercase = prot.b_factors
__lowercase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
__lowercase = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
__lowercase = aatype.shape[0]
__lowercase = 1
__lowercase = 0
__lowercase = string.ascii_uppercase
__lowercase = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
__lowercase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__lowercase = "ATOM"
__lowercase = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}"""
__lowercase = ""
__lowercase = ""
__lowercase = 1.0_0
__lowercase = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowercase = ""
__lowercase = "A"
if chain_index is not None:
__lowercase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowercase = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
__lowercase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowercase = True
__lowercase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowercase = "TER"
__lowercase = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 655 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ : Dict = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = feature_size
__lowercase = sampling_rate
__lowercase = padding_value
__lowercase = kwargs.pop("padding_side" , "right" )
__lowercase = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def _snake_case ( self : Any , lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase : Union[bool, str, PaddingStrategy] = True , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowercase = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__lowercase = "tf"
elif is_torch_tensor(lowerCamelCase ):
__lowercase = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__lowercase = "np"
else:
raise ValueError(
f"""type of {first_element} unknown: {type(lowerCamelCase )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowercase = to_numpy(lowerCamelCase )
else:
__lowercase = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowercase = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__lowercase = []
for i in range(lowerCamelCase ):
__lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowercase = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowercase = PaddingStrategy.MAX_LENGTH
__lowercase = {}
for i in range(lowerCamelCase ):
# padding
__lowercase = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowercase = []
if value.dtype is np.dtype(np.floataa ):
__lowercase = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def _snake_case ( self : Optional[int] , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowercase = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowercase = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__lowercase = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
__lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowercase = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowercase = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : int , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowercase = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : List[str] , lowerCamelCase : Tuple=False , lowerCamelCase : str=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = padding
else:
__lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 655 |
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ : List[str] = [0, 25, 50]
snake_case__ : Optional[int] = [25, 50, 75]
snake_case__ : int = fuzz.membership.trimf(X, abca)
snake_case__ : Dict = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ : List[str] = np.ones(75)
snake_case__ : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ : Tuple = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ : Any = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ : int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ : List[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
from __future__ import annotations
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if nth_term == "":
return [""]
__lowercase = int(_SCREAMING_SNAKE_CASE )
__lowercase = int(_SCREAMING_SNAKE_CASE )
__lowercase = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(F"""1 / {pow(temp + 1 , int(_SCREAMING_SNAKE_CASE ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[Any] = int(input("""Enter the last number (nth term) of the P-Series"""))
snake_case__ : Tuple = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case__ : Any = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : List[str] ):
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def _snake_case ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase = FlaxBertModel(lowerCamelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase , repo_id="test-model-flax" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase = FlaxBertModel(lowerCamelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = True
__lowercase = flatten_dict(modela.params )
__lowercase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__lowercase = False
return models_are_equal
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__lowercase = FlaxBertModel(lowerCamelCase )
__lowercase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) )
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__lowercase = FlaxBertModel(lowerCamelCase )
__lowercase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) , max_shard_size="10KB" )
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "bert"
__lowercase = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = "bert"
__lowercase = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : int , *lowerCamelCase : Any , **lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(lowerCamelCase )
def __call__( self : Optional[Any] , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Union[str, Any] , **lowerCamelCase : Any ):
'''simple docstring'''
return {}, {}, {}
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = load_image(lowerCamelCase )
__lowercase = image.size
__lowercase = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _snake_case ( self : int , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model(**lowerCamelCase )
return model_outputs
def _snake_case ( self : int , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = model_outputs.predicted_depth
__lowercase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowerCamelCase )
__lowercase = prediction.squeeze().cpu().numpy()
__lowercase = (output * 255 / np.max(lowerCamelCase )).astype("uint8" )
__lowercase = Image.fromarray(lowerCamelCase )
__lowercase = {}
__lowercase = predicted_depth
__lowercase = depth
return output_dict
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(_SCREAMING_SNAKE_CASE ) * abs(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
from __future__ import annotations
snake_case__ : Any = """#"""
class _A :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
__lowercase = {}
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = self._trie
for char in text:
if char not in trie:
__lowercase = {}
__lowercase = trie[char]
__lowercase = True
def _snake_case ( self : str , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = self._trie
for char in prefix:
if char in trie:
__lowercase = trie[char]
else:
return []
return self._elements(lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : dict ):
'''simple docstring'''
__lowercase = []
for c, v in d.items():
__lowercase = [" "] if c == END else [(c + s) for s in self._elements(lowerCamelCase )]
result.extend(lowerCamelCase )
return tuple(lowerCamelCase )
snake_case__ : str = Trie()
snake_case__ : Any = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def snake_case_ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = MobileBertTokenizer
_snake_case : Dict = MobileBertTokenizerFast
_snake_case : Optional[int] = True
_snake_case : str = True
_snake_case : Union[str, Any] = filter_non_english
_snake_case : Optional[int] = """google/mobilebert-uncased"""
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
__lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = "UNwant\u00E9d,running"
__lowercase = "unwanted, running"
return input_text, output_text
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = "UNwant\u00E9d,running"
__lowercase = tokenizer.tokenize(lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=lowerCamelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=lowerCamelCase )
__lowercase = "UNwant\u00E9d,running"
__lowercase = tokenizer.tokenize(lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowerCamelCase )
__lowercase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowercase = {}
for i, token in enumerate(lowerCamelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _snake_case ( self : List[str] ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _snake_case ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
__lowercase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _snake_case ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__lowercase = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , "do_lower_case" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = ["的", "人", "有"]
__lowercase = "".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
snake_case__ : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class _A ( unittest.TestCase , _lowercase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_tool("text-question-answering" )
self.tool.setup()
__lowercase = load_tool("text-question-answering" , remote=lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tool(lowerCamelCase , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase , "launched the BigScience Research Workshop" )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.remote_tool(lowerCamelCase , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase , "launched the BigScience Research Workshop" )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.tool(text=lowerCamelCase , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase , "launched the BigScience Research Workshop" )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.remote_tool(text=lowerCamelCase , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase , "launched the BigScience Research Workshop" )
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
snake_case__ : List[Any] = _symbol_database.Default()
snake_case__ : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
snake_case__ : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
snake_case__ : Dict = None
snake_case__ : List[Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
snake_case__ : Any = 45
snake_case__ : Any = 15_81
snake_case__ : Any = 15_17
snake_case__ : Tuple = 15_70
snake_case__ : Dict = 15_84
snake_case__ : Any = 17_93
snake_case__ : Union[str, Any] = 17_95
snake_case__ : Dict = 19_16
snake_case__ : Any = 18_64
snake_case__ : Union[str, Any] = 19_05
snake_case__ : Optional[int] = 19_19
snake_case__ : Union[str, Any] = 24_29
snake_case__ : List[str] = 22_08
snake_case__ : Dict = 24_18
snake_case__ : Any = 23_23
snake_case__ : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[int] = """git_vision_model"""
def __init__( self : str , lowerCamelCase : Dict=768 , lowerCamelCase : List[str]=3_072 , lowerCamelCase : List[Any]=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=224 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Dict="quick_gelu" , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : int=0.0 , lowerCamelCase : int=0.02 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def _snake_case ( cls : Optional[Any] , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
__lowercase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[int] = """git"""
def __init__( self : List[str] , lowerCamelCase : List[Any]=None , lowerCamelCase : int=30_522 , lowerCamelCase : List[str]=768 , lowerCamelCase : Optional[Any]=6 , lowerCamelCase : Any=12 , lowerCamelCase : Union[str, Any]=3_072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : List[str]=1_024 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Any=1e-12 , lowerCamelCase : Any=0 , lowerCamelCase : Optional[int]="absolute" , lowerCamelCase : str=True , lowerCamelCase : Optional[int]=False , lowerCamelCase : Tuple=101 , lowerCamelCase : Union[str, Any]=102 , lowerCamelCase : List[Any]=None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
__lowercase = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
__lowercase = GitVisionConfig(**lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = num_image_with_embedding
__lowercase = bos_token_id
__lowercase = eos_token_id
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_lowercase ):
'''simple docstring'''
_snake_case : Any = ["""onnx"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["onnx"] )
@classmethod
def _snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["onnx"] )
@classmethod
def _snake_case ( cls : int , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["onnx"] )
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__lowercase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase = model(lowerCamelCase )["last_hidden_state"]
__lowercase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice.
__lowercase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Tuple = logging.getLogger(__name__)
snake_case__ : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
snake_case__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
'''simple docstring'''
_snake_case : Optional[str] = field(
default=_lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_lowercase )} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _A :
'''simple docstring'''
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
_snake_case : bool = field(default=_lowercase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
_snake_case : float = field(
default=0.1_5 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
_snake_case : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
_snake_case : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
_snake_case : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__lowercase = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["eval_loss"] )
__lowercase = {"perplexity": perplexity}
__lowercase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[str] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""PoolFormerFeatureExtractor"""]
snake_case__ : Optional[int] = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 655 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ):
__lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : DDPMScheduler , lowerCamelCase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
if latents is None:
__lowercase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase = latents.to(lowerCamelCase )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : List[Any] , lowerCamelCase : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
__lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple , lowerCamelCase : Any=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : List[Any] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Any , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase = self._execution_device
__lowercase = guidance_scale > 1.0
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.cat(lowerCamelCase , dim=0 )
__lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__lowercase = self.scheduler.timesteps
__lowercase = self.unet.config.in_channels
__lowercase , __lowercase = downscale_height_and_width(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {"image_embeds": image_embeds}
__lowercase = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , )[0]
# post-processing
__lowercase = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 655 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = """naver-clova-ix/donut-base-finetuned-docvqa"""
_snake_case : List[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
_snake_case : Tuple = """document_qa"""
_snake_case : Tuple = AutoProcessor
_snake_case : int = VisionEncoderDecoderModel
_snake_case : Dict = ["""image""", """text"""]
_snake_case : Any = ["""text"""]
def __init__( self : Tuple , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : List[Any] , lowerCamelCase : "Image" , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__lowercase = task_prompt.replace("{user_input}" , lowerCamelCase )
__lowercase = self.pre_processor.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" ).input_ids
__lowercase = self.pre_processor(lowerCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self : List[str] , lowerCamelCase : int ):
'''simple docstring'''
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCamelCase , ).sequences
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = self.pre_processor.batch_decode(lowerCamelCase )[0]
__lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
__lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
__lowercase = re.sub(R"<.*?>" , "" , lowerCamelCase , count=1 ).strip() # remove first task start token
__lowercase = self.pre_processor.tokenajson(lowerCamelCase )
return sequence["answer"]
| 655 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) )
if alternative_union:
__lowercase = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
else:
__lowercase = len(set_a.union(_SCREAMING_SNAKE_CASE ) )
return intersection / union
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
__lowercase = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / union
else:
__lowercase = set_a + [element for element in set_b if element not in set_a]
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return None
if __name__ == "__main__":
snake_case__ : Dict = {"""a""", """b""", """c""", """d""", """e"""}
snake_case__ : Any = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 655 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """gptsan-japanese"""
_snake_case : int = [
"""past_key_values""",
]
_snake_case : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCamelCase : Optional[int]=36_000 , lowerCamelCase : Optional[Any]=1_280 , lowerCamelCase : Dict=1_024 , lowerCamelCase : Optional[int]=8_192 , lowerCamelCase : Union[str, Any]=4_096 , lowerCamelCase : List[Any]=128 , lowerCamelCase : int=10 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Any=128 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=1e-5 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : int=0.0 , lowerCamelCase : str="float32" , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=0.002 , lowerCamelCase : int=False , lowerCamelCase : List[Any]=True , lowerCamelCase : str=35_998 , lowerCamelCase : Dict=35_995 , lowerCamelCase : str=35_999 , **lowerCamelCase : int , ):
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = d_ff
__lowercase = d_ext
__lowercase = d_spout
__lowercase = num_switch_layers
__lowercase = num_ext_layers
__lowercase = num_switch_layers + num_ext_layers
__lowercase = num_heads
__lowercase = num_experts
__lowercase = expert_capacity
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = router_bias
__lowercase = router_jitter_noise
__lowercase = router_dtype
__lowercase = router_ignore_padding_tokens
__lowercase = output_hidden_states
__lowercase = output_attentions
__lowercase = initializer_factor
__lowercase = output_router_logits
__lowercase = use_cache
super().__init__(
separator_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
| 655 |
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , lowerCamelCase : int = 128 , lowerCamelCase : int = 256 , lowerCamelCase : float = 2000.0 , lowerCamelCase : int = 768 , lowerCamelCase : int = 12 , lowerCamelCase : int = 12 , lowerCamelCase : int = 64 , lowerCamelCase : int = 2_048 , lowerCamelCase : float = 0.1 , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Sequential(
nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , )
__lowercase = nn.Embedding(lowerCamelCase , lowerCamelCase )
__lowercase = False
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = nn.Dropout(p=lowerCamelCase )
__lowercase = nn.ModuleList()
for lyr_num in range(lowerCamelCase ):
# FiLM conditional T5 decoder
__lowercase = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
self.decoders.append(lowerCamelCase )
__lowercase = TaLayerNorm(lowerCamelCase )
__lowercase = nn.Dropout(p=lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowercase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowercase = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowercase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowercase = torch.broadcast_to(
torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowercase = self.position_encoding(lowerCamelCase )
__lowercase = self.continuous_inputs_projection(lowerCamelCase )
inputs += position_encodings
__lowercase = self.dropout(lowerCamelCase )
# decoder: No padding present.
__lowercase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowercase = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowercase = lyr(
lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0]
__lowercase = self.decoder_norm(lowerCamelCase )
__lowercase = self.post_dropout(lowerCamelCase )
__lowercase = self.spec_out(lowerCamelCase )
return spec_out
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=1e-6 ):
'''simple docstring'''
super().__init__()
__lowercase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=None , lowerCamelCase : List[str]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : int=None , lowerCamelCase : List[str]=None , ):
'''simple docstring'''
__lowercase = self.layer[0](
lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , )
if encoder_hidden_states is not None:
__lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__lowercase = self.layer[1](
lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
__lowercase = self.layer[-1](lowerCamelCase , lowerCamelCase )
return (hidden_states,)
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
__lowercase = TaLayerNorm(lowerCamelCase )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
__lowercase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
__lowercase = nn.Dropout(lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str]=None , lowerCamelCase : Optional[Any]=None , ):
'''simple docstring'''
__lowercase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
__lowercase = self.FiLMLayer(lowerCamelCase , lowerCamelCase )
# Self-attention block
__lowercase = self.attention(lowerCamelCase )
__lowercase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Any ):
'''simple docstring'''
super().__init__()
__lowercase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
__lowercase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
__lowercase = nn.Dropout(lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=None , lowerCamelCase : str=None , ):
'''simple docstring'''
__lowercase = self.layer_norm(lowerCamelCase )
__lowercase = self.attention(
lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowercase = hidden_states + self.dropout(lowerCamelCase )
return layer_output
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__()
__lowercase = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
__lowercase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
__lowercase = nn.Dropout(lowerCamelCase )
def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowercase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
__lowercase = self.film(lowerCamelCase , lowerCamelCase )
__lowercase = self.DenseReluDense(lowerCamelCase )
__lowercase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = nn.Dropout(lowerCamelCase )
__lowercase = NewGELUActivation()
def _snake_case ( self : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.act(self.wi_a(lowerCamelCase ) )
__lowercase = self.wi_a(lowerCamelCase )
__lowercase = hidden_gelu * hidden_linear
__lowercase = self.dropout(lowerCamelCase )
__lowercase = self.wo(lowerCamelCase )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[int]=1e-6 ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.ones(lowerCamelCase ) )
__lowercase = eps
def _snake_case ( self : Dict , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase )
__lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowercase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def _snake_case ( self : List[str] , lowerCamelCase : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCamelCase , 3.0 )) ))
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase )
def _snake_case ( self : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = self.scale_bias(lowerCamelCase )
__lowercase , __lowercase = torch.chunk(lowerCamelCase , 2 , -1 )
__lowercase = x * (1 + scale) + shift
return x
| 655 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = SMALL_MODEL_IDENTIFIER
__lowercase = "pt"
__lowercase = "tf"
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCamelCase )
def _snake_case ( self : int , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCamelCase )
model_tf.save_pretrained(lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "mock_framework"
# Framework provided - return whatever the user provides
__lowercase = FeaturesManager.determine_framework(self.test_model , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase )
__lowercase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase )
__lowercase = FeaturesManager.determine_framework(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase )
__lowercase = FeaturesManager.determine_framework(lowerCamelCase )
self.assertEqual(lowerCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase )
__lowercase = FeaturesManager.determine_framework(lowerCamelCase )
self.assertEqual(lowerCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCamelCase ):
__lowercase = FeaturesManager.determine_framework(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = MagicMock(return_value=lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ):
__lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowercase = MagicMock(return_value=lowerCamelCase )
with patch("transformers.onnx.features.is_torch_available" , lowerCamelCase ):
__lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
__lowercase = MagicMock(return_value=lowerCamelCase )
__lowercase = MagicMock(return_value=lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , lowerCamelCase ):
__lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase , self.framework_pt )
# Both not in environment -> raise error
__lowercase = MagicMock(return_value=lowerCamelCase )
__lowercase = MagicMock(return_value=lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , lowerCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , lowerCamelCase ):
with self.assertRaises(lowerCamelCase ):
__lowercase = FeaturesManager.determine_framework(self.test_model )
| 655 |
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case__ : Dict = datasets.logging.get_logger(__name__)
snake_case__ : Union[str, Any] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
snake_case__ : str = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
snake_case__ : List[str] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
snake_case__ : Dict = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
__lowercase = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(lowerCamelCase , lowerCamelCase ) )
def _snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = self.scorer.score(references=lowerCamelCase , candidates=lowerCamelCase )
return {"scores": scores}
| 655 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
snake_case__ : Tuple = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ):
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = list(_SCREAMING_SNAKE_CASE )
__lowercase = list(_SCREAMING_SNAKE_CASE )
__lowercase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
__lowercase = "_"
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = []
while True:
__lowercase = ["$"] * len(_SCREAMING_SNAKE_CASE )
__lowercase = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase = "*"
__lowercase = "*"
temp.append("X" )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
__lowercase = list(set(_SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
for minterm in minterms:
__lowercase = ""
for _ in range(_SCREAMING_SNAKE_CASE ):
__lowercase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = list(_SCREAMING_SNAKE_CASE )
__lowercase = list(_SCREAMING_SNAKE_CASE )
__lowercase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
__lowercase = 0
__lowercase = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
__lowercase = j
if count == 1:
__lowercase = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = 0
temp.append(prime_implicants[i] )
while True:
__lowercase = 0
__lowercase = -1
__lowercase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = chart[i].count(1 )
if count_n > max_n:
__lowercase = count_n
__lowercase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = 0
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = prime_implicants[i].count("_" )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
__lowercase = 1
return chart
def snake_case_ ( ):
__lowercase = int(input("Enter the no. of variables\n" ) )
__lowercase = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
__lowercase = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = check(_SCREAMING_SNAKE_CASE )
print("Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
__lowercase = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Essential Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import numpy as np
import datasets
snake_case__ : Union[str, Any] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
snake_case__ : List[Any] = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
snake_case__ : Any = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__lowercase = X - np.mean(lowerCamelCase )
__lowercase = np.cov(reference_distribution.T )
try:
__lowercase = np.linalg.inv(lowerCamelCase )
except np.linalg.LinAlgError:
__lowercase = np.linalg.pinv(lowerCamelCase )
__lowercase = np.dot(lowerCamelCase , lowerCamelCase )
__lowercase = np.dot(lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """unispeech"""
def __init__( self : Any , lowerCamelCase : List[str]=32 , lowerCamelCase : Optional[int]=768 , lowerCamelCase : int=12 , lowerCamelCase : int=12 , lowerCamelCase : Dict=3_072 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Any=0.02 , lowerCamelCase : Dict=1e-5 , lowerCamelCase : Optional[Any]="group" , lowerCamelCase : str="gelu" , lowerCamelCase : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase : Dict=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase : Any=False , lowerCamelCase : Dict=128 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Any=False , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=0.05 , lowerCamelCase : Tuple=10 , lowerCamelCase : int=2 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=10 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict=320 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=100 , lowerCamelCase : List[str]=256 , lowerCamelCase : List[Any]=256 , lowerCamelCase : str=0.1 , lowerCamelCase : Union[str, Any]="mean" , lowerCamelCase : List[Any]=False , lowerCamelCase : List[str]=False , lowerCamelCase : int=256 , lowerCamelCase : Any=80 , lowerCamelCase : Optional[Any]=0 , lowerCamelCase : int=1 , lowerCamelCase : Any=2 , lowerCamelCase : Optional[Any]=0.5 , **lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = list(lowerCamelCase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = feat_quantizer_dropout
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# pretraining loss
__lowercase = replace_prob
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : List[Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
snake_case__ : Optional[int] = """▁"""
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : int = VOCAB_FILES_NAMES
_snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """token_type_ids"""]
_snake_case : List[Any] = FNetTokenizer
def __init__( self : Optional[int] , lowerCamelCase : str=None , lowerCamelCase : str=None , lowerCamelCase : List[Any]=False , lowerCamelCase : int=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict="<unk>" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Optional[Any]="[CLS]" , lowerCamelCase : Union[str, Any]="[MASK]" , **lowerCamelCase : int , ):
'''simple docstring'''
__lowercase = (
AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase , normalized=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase )
else mask_token
)
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _snake_case ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self : List[str] , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase : Tuple[int] = (64,) , lowerCamelCase : int = 1 , lowerCamelCase : str = "silu" , lowerCamelCase : int = 4 , lowerCamelCase : int = 32 , lowerCamelCase : int = 32 , lowerCamelCase : float = 0.1_8215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowercase = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__lowercase = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__lowercase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowercase = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__lowercase = False
__lowercase = False
# only relevant if vae tiling is enabled
__lowercase = self.config.sample_size
__lowercase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowercase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowercase = 0.25
def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Dict=False ):
'''simple docstring'''
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__lowercase = value
def _snake_case ( self : Tuple , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowercase = use_tiling
def _snake_case ( self : Any ):
'''simple docstring'''
self.enable_tiling(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = True
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = {}
def fn_recursive_add_processors(lowerCamelCase : str , lowerCamelCase : torch.nn.Module , lowerCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase , "set_processor" ):
__lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def _snake_case ( self : str , lowerCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__lowercase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase : str , lowerCamelCase : torch.nn.Module , lowerCamelCase : Any ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _snake_case ( self : Tuple , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__lowercase = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__lowercase = torch.cat(lowerCamelCase )
else:
__lowercase = self.encoder(lowerCamelCase )
__lowercase = self.quant_conv(lowerCamelCase )
__lowercase = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def _snake_case ( self : str , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__lowercase = self.post_quant_conv(lowerCamelCase )
__lowercase = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def _snake_case ( self : Optional[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowercase = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__lowercase = torch.cat(lowerCamelCase )
else:
__lowercase = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__lowercase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _snake_case ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__lowercase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _snake_case ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowercase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowercase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowercase = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__lowercase = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__lowercase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowercase = self.encoder(lowerCamelCase )
__lowercase = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__lowercase = []
for i, row in enumerate(lowerCamelCase ):
__lowercase = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__lowercase = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__lowercase = torch.cat(lowerCamelCase , dim=2 )
__lowercase = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowercase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowercase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowercase = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__lowercase = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__lowercase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowercase = self.post_quant_conv(lowerCamelCase )
__lowercase = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__lowercase = []
for i, row in enumerate(lowerCamelCase ):
__lowercase = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__lowercase = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__lowercase = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : torch.FloatTensor , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[torch.Generator] = None , ):
'''simple docstring'''
__lowercase = sample
__lowercase = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__lowercase = posterior.sample(generator=lowerCamelCase )
else:
__lowercase = posterior.mode()
__lowercase = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
snake_case__ : Optional[int] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case__ : Optional[Any] = 16
snake_case__ : Tuple = 32
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return int(x / 2**2_0 )
class _A :
'''simple docstring'''
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[int] , *lowerCamelCase : Any ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__lowercase = torch.cuda.memory_allocated()
__lowercase = torch.cuda.max_memory_allocated()
__lowercase = bamb(self.end - self.begin )
__lowercase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1_6 , _SCREAMING_SNAKE_CASE = "bert-base-cased" , _SCREAMING_SNAKE_CASE = 3_2_0 , _SCREAMING_SNAKE_CASE = 1_6_0 , ):
__lowercase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowercase = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__lowercase = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Initialize accelerator
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["lr"]
__lowercase = int(config["num_epochs"] )
__lowercase = int(config["seed"] )
__lowercase = int(config["batch_size"] )
__lowercase = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__lowercase = 1
__lowercase = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__lowercase = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
__lowercase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_SCREAMING_SNAKE_CASE , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"--output_dir" , type=_SCREAMING_SNAKE_CASE , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=_SCREAMING_SNAKE_CASE , default=3_2_0 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=_SCREAMING_SNAKE_CASE , default=1_6_0 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of train epochs." , )
__lowercase = parser.parse_args()
__lowercase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _A :
'''simple docstring'''
_snake_case : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
_snake_case : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
_snake_case : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def snake_case_ ( ):
__lowercase = HfArgumentParser((ModelArguments,) )
((__lowercase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__lowercase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__lowercase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__lowercase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__lowercase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__lowercase = True
__lowercase = True
__lowercase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_SCREAMING_SNAKE_CASE , decoder_config=_SCREAMING_SNAKE_CASE , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__lowercase = decoder_config.decoder_start_token_id
__lowercase = decoder_config.pad_token_id
if decoder_start_token_id is None:
__lowercase = decoder_config.bos_token_id
if pad_token_id is None:
__lowercase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__lowercase = decoder_config.eos_token_id
__lowercase = decoder_start_token_id
__lowercase = pad_token_id
__lowercase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__lowercase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__lowercase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
snake_case__ : Tuple = logging.getLogger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# save results
if os.path.exists(_SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" ) )
else:
os.makedirs(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = 2
if unlogit:
__lowercase = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = p * torch.log(_SCREAMING_SNAKE_CASE )
__lowercase = 0
return -plogp.sum(dim=-1 )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(_SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
__lowercase , __lowercase = model.config.num_hidden_layers, model.config.num_attention_heads
__lowercase = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
__lowercase = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
__lowercase = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowercase = None
__lowercase = 0.0
__lowercase = 0.0
for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__lowercase = tuple(t.to(args.device ) for t in inputs )
((__lowercase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowercase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowercase , __lowercase , __lowercase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase = entropy(attn.detach() , _SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowercase = 2
__lowercase = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
__lowercase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
logger.info("Head ranked by importance scores" )
__lowercase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowercase = torch.arange(
head_importance.numel() , device=args.device )
__lowercase = head_ranks.view_as(_SCREAMING_SNAKE_CASE )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase , __lowercase = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
__lowercase = torch.ones_like(_SCREAMING_SNAKE_CASE )
__lowercase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowercase = original_score
while current_score >= original_score * args.masking_threshold:
__lowercase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowercase = float("Inf" )
__lowercase = head_importance.view(-1 ).sort()[1]
if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__lowercase = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__lowercase = new_head_mask.view(-1 )
__lowercase = 0.0
__lowercase = new_head_mask.view_as(_SCREAMING_SNAKE_CASE )
__lowercase = new_head_mask.clone().detach()
print_ad_tensor(_SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
__lowercase , __lowercase , __lowercase = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = datetime.now()
__lowercase , __lowercase , __lowercase = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss
__lowercase = datetime.now() - before_time
__lowercase = sum(p.numel() for p in model.parameters() )
__lowercase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [
v,
]
assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_SCREAMING_SNAKE_CASE )
__lowercase = sum(p.numel() for p in model.parameters() )
__lowercase = datetime.now()
__lowercase , __lowercase , __lowercase = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , )
__lowercase = 1 / loss
__lowercase = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(_SCREAMING_SNAKE_CASE , args.output_dir )
def snake_case_ ( ):
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_SCREAMING_SNAKE_CASE , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_SCREAMING_SNAKE_CASE , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_SCREAMING_SNAKE_CASE , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_SCREAMING_SNAKE_CASE , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=_SCREAMING_SNAKE_CASE , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_SCREAMING_SNAKE_CASE , help="Batch size." )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--local_rank" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
__lowercase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowercase = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__lowercase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowercase = torch.device("cuda" , args.local_rank )
__lowercase = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowercase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowercase = nn.parallel.DistributedDataParallel(
_SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
__lowercase = nn.DataParallel(_SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Prepare dataset
__lowercase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowercase = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),)
__lowercase = TensorDataset(*_SCREAMING_SNAKE_CASE )
__lowercase = RandomSampler(_SCREAMING_SNAKE_CASE )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowercase = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-1_2 ):
__lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
__lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _A ( nn.Module ):
'''simple docstring'''
_snake_case : CLIPConfig
_snake_case : jnp.dtype = jnp.floataa
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = FlaxCLIPVisionModule(self.config.vision_config )
__lowercase = nn.Dense(self.config.projection_dim , use_bias=lowerCamelCase , dtype=self.dtype )
__lowercase = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowercase = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowercase = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__lowercase = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = self.vision_model(lowerCamelCase )[1]
__lowercase = self.visual_projection(lowerCamelCase )
__lowercase = jax_cosine_distance(lowerCamelCase , self.special_care_embeds )
__lowercase = jax_cosine_distance(lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowercase = 0.0
__lowercase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowercase = jnp.round(lowerCamelCase , 3 )
__lowercase = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__lowercase = is_special_care * 0.01
__lowercase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowercase = jnp.round(lowerCamelCase , 3 )
__lowercase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : str = CLIPConfig
_snake_case : Optional[int] = """clip_input"""
_snake_case : Optional[Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Tuple , lowerCamelCase : CLIPConfig , lowerCamelCase : Optional[Tuple] = None , lowerCamelCase : int = 0 , lowerCamelCase : jnp.dtype = jnp.floataa , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
if input_shape is None:
__lowercase = (1, 224, 224, 3)
__lowercase = self.module_class(config=lowerCamelCase , dtype=lowerCamelCase , **lowerCamelCase )
super().__init__(lowerCamelCase , lowerCamelCase , input_shape=lowerCamelCase , seed=lowerCamelCase , dtype=lowerCamelCase , _do_init=_do_init )
def _snake_case ( self : List[Any] , lowerCamelCase : jax.random.KeyArray , lowerCamelCase : Tuple , lowerCamelCase : FrozenDict = None ):
'''simple docstring'''
__lowercase = jax.random.normal(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = jax.random.split(lowerCamelCase )
__lowercase = {"params": params_rng, "dropout": dropout_rng}
__lowercase = self.module.init(lowerCamelCase , lowerCamelCase )["params"]
return random_params
def __call__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : dict = None , ):
'''simple docstring'''
__lowercase = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 655 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 | 1 |
import numpy as np
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return vector * sigmoid(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655 | 1 |
import json
import sys
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
__lowercase = json.load(_SCREAMING_SNAKE_CASE )
__lowercase = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
__lowercase = results[benchmark_name]
__lowercase = benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
__lowercase = "| metric |"
__lowercase = "|--------|"
__lowercase = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
__lowercase = benchmark_res[metric_name]
__lowercase = metric_vals["new"]
__lowercase = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
__lowercase = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
__lowercase = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
snake_case__ : List[str] = sys.argv[1]
snake_case__ : Optional[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 655 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case__ : int = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def get_masked_lm_array(_SCREAMING_SNAKE_CASE ):
__lowercase = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "kernel" in name:
__lowercase = array.transpose()
return torch.from_numpy(_SCREAMING_SNAKE_CASE )
def get_encoder_array(_SCREAMING_SNAKE_CASE ):
__lowercase = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "kernel" in name:
__lowercase = array.transpose()
return torch.from_numpy(_SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "kernel" in name:
__lowercase = array.transpose()
return torch.from_numpy(_SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__lowercase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = array.reshape(_SCREAMING_SNAKE_CASE )
if "kernel" in name:
__lowercase = array.transpose()
return torch.from_numpy(_SCREAMING_SNAKE_CASE )
print(F"""Loading model based on config from {config_path}...""" )
__lowercase = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
__lowercase = BertForMaskedLM(_SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__lowercase = model.bert.encoder.layer[layer_index]
# Self-attention
__lowercase = layer.attention.self
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_query_dense/kernel" , self_attn.query.weight.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_query_dense/bias" , self_attn.query.bias.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_key_dense/kernel" , self_attn.key.weight.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_key_dense/bias" , self_attn.key.bias.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_value_dense/kernel" , self_attn.value.weight.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
__lowercase = layer.attention.output
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_output_dense/kernel" , self_output.dense.weight.data.shape )
__lowercase = get_encoder_attention_layer_array(
_SCREAMING_SNAKE_CASE , "_output_dense/bias" , self_output.dense.bias.data.shape )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_attention_layer_norm/gamma" )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_attention_layer_norm/beta" )
# Intermediate
__lowercase = layer.intermediate
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_intermediate_dense/kernel" )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_intermediate_dense/bias" )
# Output
__lowercase = layer.output
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_dense/kernel" )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_dense/bias" )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_layer_norm/gamma" )
__lowercase = get_encoder_layer_array(_SCREAMING_SNAKE_CASE , "_output_layer_norm/beta" )
# Embeddings
__lowercase = get_encoder_array("_position_embedding_layer/embeddings" )
__lowercase = get_encoder_array("_type_embedding_layer/embeddings" )
__lowercase = get_encoder_array("_embedding_norm_layer/gamma" )
__lowercase = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
__lowercase = model.cls.predictions.transform
__lowercase = get_masked_lm_array("dense/kernel" )
__lowercase = get_masked_lm_array("dense/bias" )
__lowercase = get_masked_lm_array("layer_norm/gamma" )
__lowercase = get_masked_lm_array("layer_norm/beta" )
__lowercase = get_masked_lm_array("embedding_table" )
# Pooling
__lowercase = BertPooler(config=_SCREAMING_SNAKE_CASE )
__lowercase = get_encoder_array("_pooler_layer/kernel" )
__lowercase = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
__lowercase = BertForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
snake_case__ : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 655 |
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=30 , lowerCamelCase : Optional[int]=400 , lowerCamelCase : Dict=True , lowerCamelCase : str=None , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=1 / 255 , lowerCamelCase : str=True , ):
'''simple docstring'''
__lowercase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
def _snake_case ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str=False ):
'''simple docstring'''
if not batched:
__lowercase = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__lowercase , __lowercase = image.size
else:
__lowercase , __lowercase = image.shape[1], image.shape[2]
if w < h:
__lowercase = int(self.size["shortest_edge"] * h / w )
__lowercase = self.size["shortest_edge"]
elif w > h:
__lowercase = self.size["shortest_edge"]
__lowercase = int(self.size["shortest_edge"] * w / h )
else:
__lowercase = self.size["shortest_edge"]
__lowercase = self.size["shortest_edge"]
else:
__lowercase = []
for image in image_inputs:
__lowercase , __lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__lowercase = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = DetaImageProcessor if is_vision_available() else None
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = DetaImageProcessingTester(self )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {"image_id": 39_769, "annotations": target}
# encode them
__lowercase = DetaImageProcessor()
__lowercase = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__lowercase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__lowercase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
__lowercase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__lowercase = DetaImageProcessor(format="coco_panoptic" )
__lowercase = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__lowercase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__lowercase = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__lowercase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 655 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0 ):
__lowercase = 1
__lowercase = 2
for i in range(2 , max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 655 |
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case__ : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def snake_case_ ( ):
__lowercase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
__lowercase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DownloadCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
ServeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
UserCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelLikeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
LfsCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
PTtoTFCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[int]=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : List[str]="silu" , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : int=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=True , lowerCamelCase : int=True , lowerCamelCase : int=10 , lowerCamelCase : str=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : str = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Any = False
_snake_case : int = False
_snake_case : Tuple = False
_snake_case : str = False
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any ):
__lowercase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case_ ( ):
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = 0
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(lowerCamelCase ) / "preprocessor_config.json"
__lowercase = Path(lowerCamelCase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(lowerCamelCase ) / "preprocessor_config.json"
__lowercase = Path(lowerCamelCase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowercase = Path(lowerCamelCase ) / "preprocessor_config.json"
__lowercase = Path(lowerCamelCase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase ).to_dict()
config_dict.pop("image_processor_type" )
__lowercase = CLIPImageProcessor(**lowerCamelCase )
# save in new folder
model_config.save_pretrained(lowerCamelCase )
config.save_pretrained(lowerCamelCase )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(lowerCamelCase ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , "clip-base is not a local folder and is not a valid model identifier" ):
__lowercase = AutoImageProcessor.from_pretrained("clip-base" )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
__lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
__lowercase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase )
__lowercase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
try:
AutoConfig.register("custom" , lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase , lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoImageProcessor.register(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(lowerCamelCase ) / "preprocessor_config.json"
__lowercase = Path(lowerCamelCase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCamelCase , "w" ) )
__lowercase = CustomImageProcessor.from_pretrained(lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__lowercase = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
try:
AutoConfig.register("custom" , lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase , lowerCamelCase )
# If remote code is not set, the default is to use local
__lowercase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(lowerCamelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
snake_case__ : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
'''simple docstring'''
_snake_case : str = field(
default=_lowercase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_lowercase )} )
_snake_case : str = field(
default=_lowercase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_snake_case : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_snake_case : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_snake_case : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_snake_case : bool = field(
default=_lowercase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_snake_case : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_snake_case : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_snake_case : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_snake_case : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = """train"""
_snake_case : str = """dev"""
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : SquadDataTrainingArguments
_snake_case : List[SquadFeatures]
_snake_case : Split
_snake_case : bool
def __init__( self : Optional[Any] , lowerCamelCase : SquadDataTrainingArguments , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Split] = Split.train , lowerCamelCase : Optional[bool] = False , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
__lowercase = args
__lowercase = is_language_sensitive
__lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowercase = mode
# Load data features from cache or dataset file
__lowercase = "v2" if args.version_2_with_negative else "v1"
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + ".lock"
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowercase = self.old_features["features"]
__lowercase = self.old_features.get("dataset" , lowerCamelCase )
__lowercase = self.old_features.get("examples" , lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
__lowercase , __lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCamelCase , )
__lowercase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Any , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = self.features[i]
__lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowercase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowercase = torch.tensor(feature.start_position , dtype=torch.long )
__lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int=13 , lowerCamelCase : str=7 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Tuple=99 , lowerCamelCase : Tuple=32 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Dict=37 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=512 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Optional[int]=4 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = True
_snake_case : str = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowerCamelCase )[0]
__lowercase = 50_000
__lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
__lowercase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Any = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = BartphoTokenizer
_snake_case : Tuple = False
_snake_case : int = True
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
__lowercase = ["▁This", "▁is", "▁a", "▁t", "est"]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__lowercase = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Tuple , **lowerCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = "This is a là test"
__lowercase = "This is a<unk><unk> test"
return input_text, output_text
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
__lowercase = "This is a là test"
__lowercase = "▁This ▁is ▁a ▁l à ▁t est".split()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Union[str, Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
__lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _A :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : List[Any]=13 , lowerCamelCase : Tuple=7 , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[int]=99 , lowerCamelCase : Any=16 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[Any]=1 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[Any]=0.02 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = initializer_range
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowercase = shift_tokens_right(lowerCamelCase , 1 , 2 )
__lowercase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , )
__lowercase = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : int , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = 99
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowercase = input_ids.shape[0]
__lowercase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase = self._get_config_and_data()
__lowercase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__lowercase = lm_model(input_ids=lowerCamelCase )
__lowercase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowercase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__lowercase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowercase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowercase = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__lowercase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowercase = shift_tokens_right(lowerCamelCase , 1 , 2 )
__lowercase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
__lowercase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _A ( _lowercase , unittest.TestCase , _lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
_snake_case : Tuple = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = FlaxBlenderbotSmallModelTester(self )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__lowercase = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=None , **lowerCamelCase : Optional[Any] ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowercase = np.ones((1, 1) ) * model.config.eos_token_id
__lowercase = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : int = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : str = """van"""
def __init__( self : Optional[Any] , lowerCamelCase : Tuple=224 , lowerCamelCase : str=3 , lowerCamelCase : int=[7, 3, 3, 3] , lowerCamelCase : str=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : int=[3, 3, 12, 3] , lowerCamelCase : str=[8, 8, 4, 4] , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=1e-6 , lowerCamelCase : int=1e-2 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=0.0 , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_sizes
__lowercase = strides
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = mlp_ratios
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = layer_scale_init_value
__lowercase = drop_path_rate
__lowercase = dropout_rate
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[int] = ["""image_processor""", """tokenizer"""]
_snake_case : Tuple = """CLIPImageProcessor"""
_snake_case : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=None , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Any=None , **lowerCamelCase : Any ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowercase = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
__lowercase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def _snake_case ( self : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self : str ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
# Recurse if needed
if "." in tensor_name:
__lowercase = tensor_name.split("." )
for split in splits[:-1]:
__lowercase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__lowercase = new_module
__lowercase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__lowercase = tensor_name in module._buffers
__lowercase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__lowercase = False
__lowercase = False
if is_buffer or not is_bitsandbytes_available():
__lowercase = False
__lowercase = False
else:
__lowercase = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__lowercase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__lowercase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__lowercase = old_value.to(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowercase = value.to("cpu" )
if value.dtype == torch.inta:
__lowercase = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__lowercase = torch.tensor(_SCREAMING_SNAKE_CASE , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _SCREAMING_SNAKE_CASE ) and fpaa_statistics is None:
__lowercase = new_value.T
__lowercase = old_value.__dict__
if is_abit:
__lowercase = bnb.nn.IntaParams(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
elif is_abit:
__lowercase = bnb.nn.Paramsabit(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__lowercase = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_SCREAMING_SNAKE_CASE ) )
else:
if value is None:
__lowercase = old_value.to(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowercase = value.to(_SCREAMING_SNAKE_CASE )
else:
__lowercase = torch.tensor(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
if is_buffer:
__lowercase = new_value
else:
__lowercase = nn.Parameter(_SCREAMING_SNAKE_CASE , requires_grad=old_value.requires_grad )
__lowercase = new_value
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
for name, module in model.named_children():
if current_key_name is None:
__lowercase = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if (isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_SCREAMING_SNAKE_CASE ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = module.weight.shape
else:
__lowercase = module.in_features
__lowercase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__lowercase = bnb.nn.LinearabitLt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__lowercase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__lowercase = bnb.nn.Linearabit(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__lowercase = True
# Store the module class in case we need to transpose the weight later
__lowercase = type(_SCREAMING_SNAKE_CASE )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_SCREAMING_SNAKE_CASE )
if len(list(module.children() ) ) > 0:
__lowercase , __lowercase = _replace_with_bnb_linear(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_been_replaced=_SCREAMING_SNAKE_CASE , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowercase = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__lowercase , __lowercase = _replace_with_bnb_linear(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def snake_case_ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _SCREAMING_SNAKE_CASE , )
return replace_with_bnb_linear(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case_ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _SCREAMING_SNAKE_CASE , )
return set_module_quantized_tensor_to_device(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__lowercase = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__lowercase = sum(_SCREAMING_SNAKE_CASE , [] )
__lowercase = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
__lowercase = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowercase = list(model.named_children() )
__lowercase = [list_modules[-1][0]]
# add last module together with tied weights
__lowercase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
__lowercase = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
__lowercase = [".weight", ".bias"]
__lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowercase = name.replace(_SCREAMING_SNAKE_CASE , "" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be a 'int' type" )
return bin(_SCREAMING_SNAKE_CASE ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655 | 1 |
import sys
snake_case__ : Optional[int] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def snake_case_ ( _SCREAMING_SNAKE_CASE = N ):
__lowercase = -sys.maxsize - 1
__lowercase = n[:1_3]
__lowercase = 1_3
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
__lowercase = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowercase = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__lowercase = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.