code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ (_snake_case , _snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Tuple = None ,lowercase__ : Optional[Any] = None ):
super().__init__()
__lowercase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowercase = torch.zeros(_UpperCamelCase ,_UpperCamelCase )
else:
__lowercase = None
__lowercase = torch.nn.Parameter(_UpperCamelCase )
class lowercase_ (_snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : VQModel
SCREAMING_SNAKE_CASE : CLIPTextModel
SCREAMING_SNAKE_CASE : CLIPTokenizer
SCREAMING_SNAKE_CASE : TransformeraDModel
SCREAMING_SNAKE_CASE : LearnedClassifierFreeSamplingEmbeddings
SCREAMING_SNAKE_CASE : VQDiffusionScheduler
def __init__( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : int ,):
super().__init__()
self.register_modules(
vqvae=_UpperCamelCase ,transformer=_UpperCamelCase ,text_encoder=_UpperCamelCase ,tokenizer=_UpperCamelCase ,scheduler=_UpperCamelCase ,learned_classifier_free_sampling_embeddings=_UpperCamelCase ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = len(_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else 1
# get prompt text embeddings
__lowercase = self.tokenizer(
_UpperCamelCase ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowercase = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=_UpperCamelCase )
# duplicate text embeddings for each generation per prompt
__lowercase = prompt_embeds.repeat_interleave(_UpperCamelCase ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowercase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowercase = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase ,1 ,1 )
else:
__lowercase = [''] * batch_size
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
_UpperCamelCase ,padding='''max_length''' ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,return_tensors='''pt''' ,)
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowercase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=_UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = negative_prompt_embeds.shape[1]
__lowercase = negative_prompt_embeds.repeat(1 ,_UpperCamelCase ,1 )
__lowercase = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_UpperCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : Union[str, Any] = 1_0_0 ,lowercase__ : Dict = 5.0 ,lowercase__ : Any = 1.0 ,lowercase__ : List[Any] = 1 ,lowercase__ : List[str] = None ,lowercase__ : str = None ,lowercase__ : int = "pil" ,lowercase__ : List[Any] = True ,lowercase__ : Optional[Any] = None ,lowercase__ : Optional[int] = 1 ,):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowercase = 1
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowercase = len(_UpperCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
__lowercase = batch_size * num_images_per_prompt
__lowercase = guidance_scale > 1.0
__lowercase = self._encode_prompt(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase ,_UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_UpperCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
__lowercase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowercase = self.transformer.num_vector_embeds - 1
__lowercase = torch.full(_UpperCamelCase ,_UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase ,device=self.device )
__lowercase = self.scheduler.timesteps.to(self.device )
__lowercase = latents
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
__lowercase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowercase = self.transformer(_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,timestep=_UpperCamelCase ).sample
if do_classifier_free_guidance:
__lowercase = model_output.chunk(2 )
__lowercase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_UpperCamelCase ,dim=1 ,keepdim=_UpperCamelCase )
__lowercase = self.truncate(_UpperCamelCase ,_UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
__lowercase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCamelCase ,timestep=_UpperCamelCase ,sample=_UpperCamelCase ,generator=_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowercase = self.vqvae.config.vq_embed_dim
__lowercase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowercase = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase ,shape=_UpperCamelCase )
__lowercase = self.vqvae.decode(_UpperCamelCase ,force_not_quantize=_UpperCamelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ,lowercase__ : Tuple ):
__lowercase = torch.sort(_UpperCamelCase ,1 ,descending=_UpperCamelCase )
__lowercase = torch.exp(_UpperCamelCase )
__lowercase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowercase = torch.full_like(keep_mask[:, 0:1, :] ,_UpperCamelCase )
__lowercase = torch.cat((all_true, keep_mask) ,dim=1 )
__lowercase = keep_mask[:, :-1, :]
__lowercase = keep_mask.gather(1 ,indices.argsort(1 ) )
__lowercase = log_p_x_0.clone()
__lowercase = -torch.inf # -inf = log(0)
return rv
| 104 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Dict = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int=1_3 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase__ : Tuple=[2, 2, 3, 2] , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int=3_7 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : List[Any]=1_0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : int=None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Union[str, Any] = num_stages
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : Union[str, Any] = depths
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : List[Any] = out_features
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : List[Any] = num_stages
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : List[str] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Dict = False
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = UperNetModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(_UpperCamelCase )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ):
_UpperCAmelCase : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = _config_zero_init(_UpperCamelCase )
_UpperCAmelCase : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k", repo_type="dataset", filename="ADE_val_00000001.jpg" )
_UpperCAmelCase : Optional[Any] = Image.open(__snake_case ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
_UpperCAmelCase : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[Any] = processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(**_UpperCamelCase )
_UpperCAmelCase : Any = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
_UpperCAmelCase : str = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_UpperCamelCase )
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : List[str] = processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**_UpperCamelCase )
_UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) | 145 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def __lowerCAmelCase ( lowercase : Sequence[int] | None = None ) -> Union[str, Any]:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
snake_case : Optional[int] = nums[0]
for i in range(1 , len(__snake_case ) ):
snake_case : Dict = nums[i]
snake_case : Optional[Any] = max(__snake_case , ans + num , __snake_case )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case = int(input("""Enter number of elements : """).strip())
__snake_case = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( _snake_case , _snake_case ):
"""simple docstring"""
snake_case_ = True
@register_to_config
def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 3 , lowerCamelCase__ = ("DownEncoderBlock2D",) , lowerCamelCase__ = ("UpDecoderBlock2D",) , lowerCamelCase__ = (64,) , lowerCamelCase__ = 1 , lowerCamelCase__ = "silu" , lowerCamelCase__ = 4 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = 0.1_82_15 , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[str]:
'''simple docstring'''
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowercase_ ( self , lowerCamelCase__ = True ) -> int:
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self.enable_tiling(_UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = True
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(_UpperCamelCase , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(_UpperCamelCase )
else:
__lowerCamelCase = self.encoder(_UpperCamelCase )
__lowerCamelCase = self.quant_conv(_UpperCamelCase )
__lowerCamelCase = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
__lowerCamelCase = self.post_quant_conv(_UpperCamelCase )
__lowerCamelCase = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(_UpperCamelCase )
else:
__lowerCamelCase = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(_UpperCamelCase )
__lowerCamelCase = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
__lowerCamelCase = []
for i, row in enumerate(_UpperCamelCase ):
__lowerCamelCase = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
__lowerCamelCase = torch.cat(_UpperCamelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(_UpperCamelCase )
__lowerCamelCase = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
__lowerCamelCase = []
for i, row in enumerate(_UpperCamelCase ):
__lowerCamelCase = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
__lowerCamelCase = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=_UpperCamelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 90 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase () -> Tuple:
'''simple docstring'''
lowerCAmelCase = 9, 14 # noqa: F841
lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase = mst(__snake_case )
lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase = tuple(answer[:2] )
lowerCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 155 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int ):
while b:
A = b, a % b
return a
def _snake_case ( snake_case__ : int , snake_case__ : int ):
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def _snake_case ( ):
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 74 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : Tuple = 192
lowerCamelCase__ : str = 768
lowerCamelCase__ : List[Any] = 12
lowerCamelCase__ : Optional[int] = 3
lowerCamelCase__ : Optional[Any] = [800, 1333]
lowerCamelCase__ : Dict = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Tuple = 330
lowerCamelCase__ : List[str] = 14
lowerCamelCase__ : Tuple = 6
lowerCamelCase__ : List[str] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Any = 384
lowerCamelCase__ : Union[str, Any] = 1536
lowerCamelCase__ : List[str] = 12
lowerCamelCase__ : Any = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Optional[int] = [800, 1344]
lowerCamelCase__ : str = 91
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = 'coco-detection-id2label.json'
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCamelCase__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Tuple = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[str] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : str = in_proj_bias[-config.hidden_size :]
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__ : List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCamelCase__ : Dict = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCamelCase__ : Optional[int] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCamelCase__ : List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCamelCase__ : Any = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCamelCase__ : Dict = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCamelCase__ : Any = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(__snake_case )
if "qkv" in key:
lowerCamelCase__ : Any = key.split('''.''' )
lowerCamelCase__ : Optional[int] = int(key_split[2] )
lowerCamelCase__ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : str = val[
dim : dim * 2, :
]
lowerCamelCase__ : int = val[-dim:, :]
else:
lowerCamelCase__ : List[Any] = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : List[str] = val[-dim:]
else:
lowerCamelCase__ : List[str] = val
return orig_state_dict
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = get_yolos_config(__snake_case )
# load original state_dict
lowerCamelCase__ : Tuple = torch.load(__snake_case , map_location='''cpu''' )['model']
# load 🤗 model
lowerCamelCase__ : Optional[Any] = YolosForObjectDetection(__snake_case )
model.eval()
lowerCamelCase__ : List[str] = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : int = 800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : str = YolosImageProcessor(format='''coco_detection''' , size=__snake_case )
lowerCamelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase__ : Optional[Any] = model(**__snake_case )
lowerCamelCase__ : int = outputs.logits, outputs.pred_boxes
lowerCamelCase__ : Optional[Any] = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase__ : Dict = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : str = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase__ : str = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowerCamelCase__ : List[str] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('''Pushing to the hub...''' )
lowerCamelCase__ : str = model_mapping[yolos_name]
image_processor.push_to_hub(__snake_case , organization='''hustvl''' )
model.push_to_hub(__snake_case , organization='''hustvl''' )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Union[str, Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =np.max(_outputs , axis=-1 , keepdims=__snake_case )
_lowercase =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__snake_case )
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = '''sigmoid'''
SCREAMING_SNAKE_CASE__ = '''softmax'''
SCREAMING_SNAKE_CASE__ = '''none'''
@add_end_docstrings(
_snake_case , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ClassificationFunction.NONE
def __init__(self , **UpperCAmelCase ) -> Tuple:
super().__init__(**_UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __A (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="" , **UpperCAmelCase ) -> Dict:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_lowercase =tokenizer_kwargs
_lowercase ={}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
_lowercase =self.model.config.return_all_scores
if isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k is None:
_lowercase =top_k
_lowercase =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _UpperCamelCase , )
if return_all_scores:
_lowercase =None
else:
_lowercase =1
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowercase =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
_lowercase =super().__call__(*_UpperCamelCase , **_UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowercase ='top_k' not in kwargs
if isinstance(args[0] , _UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> Dict[str, GenericTensor]:
_lowercase =self.framework
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return self.tokenizer(**_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1 and isinstance(inputs[0] , _UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
def __A (self , UpperCAmelCase ) -> Tuple:
return self.model(**_UpperCamelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=True ) -> str:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowercase =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowercase =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
_lowercase =self.model.config.function_to_apply
else:
_lowercase =ClassificationFunction.NONE
_lowercase =model_outputs['logits'][0]
_lowercase =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowercase =sigmoid(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowercase =softmax(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowercase =outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowercase =[
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(_UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCAmelCase : x["score"] , reverse=_UpperCamelCase )
if top_k is not None:
_lowercase =dict_scores[:top_k]
return dict_scores
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
import heapq
def __lowerCamelCase ( lowerCamelCase__ : dict ):
'''simple docstring'''
lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : str = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 252 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 600851475143 ) -> Tuple:
try:
_a : Tuple =int(__snake_case )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_a : List[Any] =2
_a : str =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a : str =i
while n % i == 0:
_a : List[Any] =n // i
i += 1
return int(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 276 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowercase_ (_snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = '''retribert'''
def __init__( self : List[Any] ,lowercase__ : Any=3_0_5_2_2 ,lowercase__ : Optional[Any]=7_6_8 ,lowercase__ : Optional[Any]=8 ,lowercase__ : List[Any]=1_2 ,lowercase__ : Tuple=3_0_7_2 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : int=0.1 ,lowercase__ : int=0.1 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : str=0.0_2 ,lowercase__ : int=1e-1_2 ,lowercase__ : Union[str, Any]=True ,lowercase__ : str=1_2_8 ,lowercase__ : Dict=0 ,**lowercase__ : Tuple ,):
super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 104 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _snake_case :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {}
def lowerCAmelCase__ ( self , a__ , a__ , a__=1 ) -> List[Any]:
'''simple docstring'''
if self.graph.get(_UpperCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ = [[w, v]]
if not self.graph.get(_UpperCamelCase ):
snake_case_ = []
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self , a__ , a__ ) -> int:
'''simple docstring'''
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCamelCase )
def lowerCAmelCase__ ( self , a__=-2 , a__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return visited
def lowerCAmelCase__ ( self , a__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if c == -1:
snake_case_ = floor(random() * 10_000 ) + 10
for i in range(_UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCamelCase , _UpperCamelCase , 1 )
def lowerCAmelCase__ ( self , a__=-2 ) -> Tuple:
'''simple docstring'''
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
d.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
while d:
snake_case_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self , a__=-2 ) -> Optional[int]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = s
snake_case_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return sorted_nodes
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(_UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = False
indirect_parents.append(_UpperCamelCase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return list(_UpperCamelCase )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(_UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = False
indirect_parents.append(_UpperCamelCase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return False
def lowerCAmelCase__ ( self , a__=-2 , a__=-1 ) -> Tuple:
'''simple docstring'''
snake_case_ = time()
self.dfs(_UpperCamelCase , _UpperCamelCase )
snake_case_ = time()
return end - begin
def lowerCAmelCase__ ( self , a__=-2 ) -> int:
'''simple docstring'''
snake_case_ = time()
self.bfs(_UpperCamelCase )
snake_case_ = time()
return end - begin
class _snake_case :
def __init__( self ) -> str:
'''simple docstring'''
snake_case_ = {}
def lowerCAmelCase__ ( self , a__ , a__ , a__=1 ) -> Any:
'''simple docstring'''
if self.graph.get(_UpperCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ = [[w, v]]
# add the other way
if self.graph.get(_UpperCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ = [[w, u]]
def lowerCAmelCase__ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCamelCase )
# the other way round
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCamelCase )
def lowerCAmelCase__ ( self , a__=-2 , a__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return visited
def lowerCAmelCase__ ( self , a__=-1 ) -> Any:
'''simple docstring'''
if c == -1:
snake_case_ = floor(random() * 10_000 ) + 10
for i in range(_UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCamelCase , _UpperCamelCase , 1 )
def lowerCAmelCase__ ( self , a__=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
d.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
while d:
snake_case_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(_UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = False
indirect_parents.append(_UpperCamelCase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return list(_UpperCamelCase )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(_UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(_UpperCamelCase ) != 0:
snake_case_ = stack[len(_UpperCamelCase ) - 1]
else:
snake_case_ = False
indirect_parents.append(_UpperCamelCase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return False
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self , a__=-2 , a__=-1 ) -> Any:
'''simple docstring'''
snake_case_ = time()
self.dfs(_UpperCamelCase , _UpperCamelCase )
snake_case_ = time()
return end - begin
def lowerCAmelCase__ ( self , a__=-2 ) -> Tuple:
'''simple docstring'''
snake_case_ = time()
self.bfs(_UpperCamelCase )
snake_case_ = time()
return end - begin
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_UpperCAmelCase : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_UpperCAmelCase : Optional[Any] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_UpperCAmelCase : str = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
# load decoder from hub
_UpperCAmelCase : List[Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def _lowerCAmelCase ( self : int , **lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , **lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCAmelCase ( self : Any , **lowerCAmelCase__ : List[str] ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_UpperCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Tuple = floats_list((3, 1_0_0_0) )
_UpperCAmelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="np" )
_UpperCAmelCase : Any = processor(_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : List[Any] = 'This is a test string'
_UpperCAmelCase : str = processor(text=_UpperCamelCase )
_UpperCAmelCase : List[str] = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Any=(2, 1_0, 1_6) , lowerCAmelCase__ : int=7_7 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(_UpperCamelCase )
return np.random.rand(*_UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : int = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
_UpperCAmelCase : Union[str, Any] = processor.decode(_UpperCamelCase )
_UpperCAmelCase : List[Any] = decoder.decode_beams(_UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(_UpperCamelCase )
else:
with get_context(_UpperCamelCase ).Pool() as pool:
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : List[Any] = list(_UpperCamelCase )
with get_context("fork" ).Pool() as p:
_UpperCAmelCase : Dict = decoder.decode_beams_batch(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCamelCase , decoded_processor.lm_score )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_decoder()
_UpperCAmelCase : int = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Any = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 1_5
_UpperCAmelCase : str = -20.0
_UpperCAmelCase : List[Any] = -4.0
_UpperCAmelCase : Dict = processor.batch_decode(
_UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
_UpperCAmelCase : List[str] = decoded_processor_out.text
_UpperCAmelCase : Dict = list(_UpperCamelCase )
with get_context("fork" ).Pool() as pool:
_UpperCAmelCase : Optional[int] = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
_UpperCAmelCase : Dict = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _UpperCamelCase )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCamelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCamelCase , atol=1e-3 ) )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = 2.0
_UpperCAmelCase : List[str] = 5.0
_UpperCAmelCase : Optional[Any] = -20.0
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = processor.batch_decode(
_UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
_UpperCAmelCase : Dict = decoded_processor_out.text
_UpperCAmelCase : Any = list(_UpperCamelCase )
decoder.reset_params(
alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
with get_context("fork" ).Pool() as pool:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , )
_UpperCAmelCase : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _UpperCamelCase )
_UpperCAmelCase : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCAmelCase : List[Any] = os.listdir(_UpperCamelCase )
_UpperCAmelCase : List[str] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = snapshot_download("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCAmelCase : int = os.listdir(_UpperCamelCase )
_UpperCAmelCase : Dict = os.listdir(_UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Any = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_UpperCAmelCase : Tuple = processor_wavaveca(_UpperCamelCase , return_tensors="np" )
_UpperCAmelCase : List[str] = processor_auto(_UpperCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor_wavaveca.batch_decode(_UpperCamelCase )
_UpperCAmelCase : List[str] = processor_auto.batch_decode(_UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Tuple = self._get_dummy_logits()[0]
_UpperCAmelCase : Union[str, Any] = processor.decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()
_UpperCAmelCase : Any = processor.batch_decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
import torch
_UpperCAmelCase : Optional[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=_UpperCamelCase )
_UpperCAmelCase : Dict = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
_UpperCAmelCase : Any = iter(_UpperCamelCase )
_UpperCAmelCase : int = next(_UpperCamelCase )
_UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : Dict = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_UpperCamelCase ).logits.cpu().numpy()
_UpperCAmelCase : List[Any] = processor.decode(logits[0] , output_word_offsets=_UpperCamelCase )
_UpperCAmelCase : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Tuple = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_UpperCAmelCase : List[str] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) , _UpperCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) , output.text )
# output times
_UpperCAmelCase : Tuple = torch.tensor(self.get_from_offsets(_UpperCamelCase , "start_time" ) )
_UpperCAmelCase : List[Any] = torch.tensor(self.get_from_offsets(_UpperCamelCase , "end_time" ) )
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) ) | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_snake_case ) , '''Tatoeba directory does not exist.''' )
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Any = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = Path('data_bin' )
__lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) , checkpoint_file=Path(__snake_case ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(__snake_case ) , bpe='sentencepiece' , sentencepiece_model=str(Path(__snake_case ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(__snake_case )
__lowerCamelCase = xmod.model.encoder.sentence_encoder
__lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , __snake_case )
__lowerCamelCase = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowerCamelCase = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowerCamelCase = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
# output
__lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
__lowerCamelCase = xmod_layer.final_layer_norm.weight
__lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase = bert_output.adapter_modules[lang_code]
__lowerCamelCase = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'].dense.weight
__lowerCamelCase = xmod.model.classification_heads['mnli'].dense.bias
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = xmod.model.encoder.lm_head.weight
__lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
__lowerCamelCase = model(__snake_case )[0]
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'](xmod.extract_features(__snake_case ) )
else:
__lowerCamelCase = xmod.model(__snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase = torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _snake_case , unittest.TestCase ):
_a = ConsistencyModelPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_a = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def __lowercase ( self : Tuple ):
lowerCAmelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __lowercase ( self : Tuple ):
lowerCAmelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __lowercase ( self : Dict , lowerCAmelCase : Optional[int]=False ):
if class_cond:
lowerCAmelCase = self.dummy_cond_unet
else:
lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowercase ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=0 ):
if str(_UpperCamelCase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = ConsistencyModelPipeline(**_UpperCamelCase )
lowerCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components(class_cond=_UpperCamelCase )
lowerCAmelCase = ConsistencyModelPipeline(**_UpperCamelCase )
lowerCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = ConsistencyModelPipeline(**_UpperCamelCase )
lowerCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase ( self : Any ):
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components(class_cond=_UpperCamelCase )
lowerCAmelCase = ConsistencyModelPipeline(**_UpperCamelCase )
lowerCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = 0
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Optional[int]="cpu" , lowerCAmelCase : Optional[int]=torch.floataa , lowerCAmelCase : List[Any]=(1, 3, 64, 64) ):
lowerCAmelCase = torch.manual_seed(_UpperCamelCase )
lowerCAmelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase = self.get_fixed_latents(seed=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase , shape=_UpperCamelCase )
lowerCAmelCase = latents
return inputs
def __lowercase ( self : Any , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]="cpu" , lowerCAmelCase : Tuple=torch.floataa , lowerCAmelCase : Any=(1, 3, 64, 64) ):
if type(_UpperCamelCase ) == str:
lowerCAmelCase = torch.device(_UpperCamelCase )
lowerCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
return latents
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __lowercase ( self : Any ):
lowerCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __lowercase ( self : str ):
lowerCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __lowercase ( self : List[Any] ):
lowerCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
lowerCAmelCase = 1
lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
lowerCAmelCase = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 155 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase_ ( _snake_case ):
'''simple docstring'''
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
A = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
A = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCamelCase )
BertModel.from_pretrained(_UpperCamelCase )
BertTokenizer.from_pretrained(_UpperCamelCase )
pipeline(task='fill-mask' ,model=_UpperCamelCase )
# baseline - just load from_pretrained with normal network
A = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A = '1'
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
A = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
A = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCamelCase )
BertModel.from_pretrained(_UpperCamelCase )
BertTokenizer.from_pretrained(_UpperCamelCase )
pipeline(task='fill-mask' ,model=_UpperCamelCase )
# baseline - just load from_pretrained with normal network
A = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
A = self.get_env()
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
A = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
A = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
A = self.get_env()
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
A = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A = '1'
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = '\nfrom transformers import pipeline\n '
A = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
A = self.get_env()
A = '1'
A = [sys.executable, '-c', '\n'.join([load, mock, run] )]
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = '\nfrom transformers import AutoModel\n '
A = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
A = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
A = self.get_env()
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A = '1'
A = subprocess.run(_UpperCamelCase ,env=_UpperCamelCase ,check=_UpperCamelCase ,capture_output=_UpperCamelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() ) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_A : List[str] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_A : int = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_A : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
return x[0]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_letter_count(__snake_case )
lowerCamelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__snake_case )
lowerCamelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__snake_case )
lowerCamelCase__ : List[str] = ''.join(freq_to_letter[freq] )
lowerCamelCase__ : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__snake_case , reverse=__snake_case )
lowerCamelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__snake_case )
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_frequency_order(__snake_case )
lowerCamelCase__ : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase__ = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , A=10_00 , ) -> Any:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = range_bbox
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase = bbox[i, j, 3]
lowerCamelCase = bbox[i, j, 1]
lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase = bbox[i, j, 2]
lowerCamelCase = bbox[i, j, 0]
lowerCamelCase = t
lowerCamelCase = tf.convert_to_tensor(_UpperCamelCase )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModel(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForMaskedLM(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFLayoutLMForSequenceClassification(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFLayoutLMForTokenClassification(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForQuestionAnswering(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
lowerCamelCase
) = config_and_inputs
lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = False
UpperCamelCase : Dict = True
UpperCamelCase : Dict = 1_0
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFLayoutLMModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the sequence output on [0, :3, :3]
lowerCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _UpperCamelCase , atol=1e-3 ) )
@slow
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase = outputs.loss
lowerCamelCase = (2,)
self.assertEqual(loss.shape , _UpperCamelCase )
# test the shape of the logits
lowerCamelCase = outputs.logits
lowerCamelCase = (2, 2)
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
# test the shape of the logits
lowerCamelCase = outputs.logits
lowerCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the shape of the logits
lowerCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , _UpperCamelCase )
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[Any] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A__ ( _snake_case ):
__UpperCamelCase : Union[str, Any] = '''yolos'''
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Tuple=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Dict=0.0 , SCREAMING_SNAKE_CASE :Optional[Any]=0.0 , SCREAMING_SNAKE_CASE :int=0.02 , SCREAMING_SNAKE_CASE :List[str]=1e-12 , SCREAMING_SNAKE_CASE :Dict=[5_1_2, 8_6_4] , SCREAMING_SNAKE_CASE :str=1_6 , SCREAMING_SNAKE_CASE :List[str]=3 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Optional[int]=5 , SCREAMING_SNAKE_CASE :str=2 , SCREAMING_SNAKE_CASE :Optional[Any]=5 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :str=0.1 , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
_a : List[str] =hidden_size
_a : Any =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Optional[Any] =intermediate_size
_a : Any =hidden_act
_a : int =hidden_dropout_prob
_a : List[Any] =attention_probs_dropout_prob
_a : Tuple =initializer_range
_a : Optional[Any] =layer_norm_eps
_a : Dict =image_size
_a : List[str] =patch_size
_a : Optional[Any] =num_channels
_a : Union[str, Any] =qkv_bias
_a : Optional[int] =num_detection_tokens
_a : Dict =use_mid_position_embeddings
_a : Union[str, Any] =auxiliary_loss
# Hungarian matcher
_a : List[str] =class_cost
_a : Dict =bbox_cost
_a : str =giou_cost
# Loss coefficients
_a : List[str] =bbox_loss_coefficient
_a : Union[str, Any] =giou_loss_coefficient
_a : int =eos_coefficient
class A__ ( _snake_case ):
__UpperCamelCase : List[Any] = version.parse("1.11" )
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCAmelCase ( self :List[Any] ) -> float:
'''simple docstring'''
return 1e-4
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return 1_2
| 276 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def UpperCamelCase_( snake_case : Path , snake_case : list ):
'''simple docstring'''
snake_case_ = '\n'.join(__snake_case )
Path(__snake_case ).open("w" ).writelines(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = "patrickvonplaten/t5-tiny-random"
_SCREAMING_SNAKE_CASE : Any = "sshleifer/bart-tiny-random"
_SCREAMING_SNAKE_CASE : Optional[Any] = "sshleifer/tiny-mbart"
_SCREAMING_SNAKE_CASE : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( _snake_case ):
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
snake_case_ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
snake_case_ = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_UpperCamelCase , _UpperCamelCase )
snake_case_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
snake_case_ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
snake_case_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase ):
run_generate()
assert Path(_UpperCamelCase ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.run_eval_tester(_UpperCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
self.run_eval_tester(_UpperCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
snake_case_ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
snake_case_ = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
snake_case_ = Path(self.get_auto_remove_tmp_dir() )
snake_case_ = str(tmp_dir / "scores.json" )
snake_case_ = str(tmp_dir / "val.target" )
_dump_articles(_UpperCamelCase , text["en"] )
_dump_articles(_UpperCamelCase , text["de"] )
snake_case_ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
snake_case_ = F'\n run_eval_search.py\n {model}\n {str(_UpperCamelCase )}\n {str(_UpperCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase ):
with CaptureStdout() as cs:
run_search()
snake_case_ = [' num_beams | length_penalty', model, 'Best score args']
snake_case_ = ['Info']
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(_UpperCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCamelCase ).exists()
os.remove(Path(_UpperCamelCase ) )
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
_UpperCAmelCase : List[Any] = max(len(__snake_case ), len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ), b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
from __future__ import annotations
__A = list[list[int]]
# assigning initial values to the grid
__A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> Union[str, Any]:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> Optional[int]:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
__lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCamelCase = digit
if sudoku(__snake_case ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> List[str]:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 90 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
_a = '''speech_to_text_2'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , lowerCAmelCase : Any=1_0000 , lowerCAmelCase : List[Any]=6 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : int=4 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : Any=256 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[int]=1024 , **lowerCAmelCase : int , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
| 155 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( snake_case__ : float ):
assert type(__snake_case ) in (int, float) and decimal == int(__snake_case )
A = int(__snake_case )
A = ''
A = False
if decimal < 0:
A = True
decimal *= -1
while decimal > 0:
A = divmod(__snake_case , 16 )
A = values[remainder] + hexadecimal
A = '0x' + hexadecimal
if negative:
A = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : Optional[Any] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : Dict = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : List[Any] = {
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : Any = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_A : Tuple = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_A : Optional[int] = {
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = checkpoint[f"{old_prefix}.in_layers.0.weight"]
lowerCamelCase__ : str = checkpoint[f"{old_prefix}.in_layers.0.bias"]
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.in_layers.2.weight"]
lowerCamelCase__ : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
lowerCamelCase__ : int = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
lowerCamelCase__ : Any = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
lowerCamelCase__ : List[str] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
lowerCamelCase__ : Tuple = checkpoint[f"{old_prefix}.out_layers.0.bias"]
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.out_layers.3.weight"]
lowerCamelCase__ : Optional[int] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f"{old_prefix}.skip_connection.weight"]
lowerCamelCase__ : List[str] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> str:
"""simple docstring"""
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
lowerCamelCase__ : List[str] = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.norm.weight"]
lowerCamelCase__ : int = checkpoint[f"{old_prefix}.norm.bias"]
lowerCamelCase__ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : str = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Optional[int] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Any = torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase__ : int = {}
lowerCamelCase__ : Optional[int] = checkpoint['time_embed.0.weight']
lowerCamelCase__ : str = checkpoint['time_embed.0.bias']
lowerCamelCase__ : str = checkpoint['time_embed.2.weight']
lowerCamelCase__ : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Any = checkpoint['label_emb.weight']
lowerCamelCase__ : Dict = checkpoint['input_blocks.0.0.weight']
lowerCamelCase__ : List[str] = checkpoint['input_blocks.0.0.bias']
lowerCamelCase__ : List[Any] = unet_config['down_block_types']
lowerCamelCase__ : Any = unet_config['layers_per_block']
lowerCamelCase__ : Optional[Any] = unet_config['attention_head_dim']
lowerCamelCase__ : Union[str, Any] = unet_config['block_out_channels']
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(__snake_case ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__snake_case ):
lowerCamelCase__ : Tuple = f"down_blocks.{i}.resnets.{j}"
lowerCamelCase__ : Dict = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Optional[int] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__snake_case ):
lowerCamelCase__ : Optional[int] = f"down_blocks.{i}.resnets.{j}"
lowerCamelCase__ : str = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : Dict = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
lowerCamelCase__ : Dict = f"down_blocks.{i}.attentions.{j}"
lowerCamelCase__ : List[Any] = f"input_blocks.{current_layer}.1"
lowerCamelCase__ : List[str] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
lowerCamelCase__ : Dict = f"down_blocks.{i}.downsamplers.0"
lowerCamelCase__ : Optional[Any] = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : Any = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
lowerCamelCase__ : Optional[int] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : int = 'mid_block.resnets.0'
lowerCamelCase__ : Optional[Any] = 'middle_block.0'
lowerCamelCase__ : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase__ : Optional[Any] = 'mid_block.attentions.0'
lowerCamelCase__ : Union[str, Any] = 'middle_block.1'
lowerCamelCase__ : List[Any] = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase__ : int = 'mid_block.resnets.1'
lowerCamelCase__ : Dict = 'middle_block.2'
lowerCamelCase__ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase__ : int = 0
lowerCamelCase__ : Union[str, Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : Dict = f"up_blocks.{i}.resnets.{j}"
lowerCamelCase__ : Dict = f"output_blocks.{current_layer}.0"
lowerCamelCase__ : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
lowerCamelCase__ : Dict = f"up_blocks.{i}.upsamplers.0"
lowerCamelCase__ : Optional[Any] = f"output_blocks.{current_layer-1}.1"
lowerCamelCase__ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : Optional[int] = f"up_blocks.{i}.resnets.{j}"
lowerCamelCase__ : List[Any] = f"output_blocks.{current_layer}.0"
lowerCamelCase__ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
lowerCamelCase__ : List[str] = f"up_blocks.{i}.attentions.{j}"
lowerCamelCase__ : Optional[Any] = f"output_blocks.{current_layer}.1"
lowerCamelCase__ : Optional[Any] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
lowerCamelCase__ : List[str] = f"up_blocks.{i}.upsamplers.0"
lowerCamelCase__ : List[str] = f"output_blocks.{current_layer-1}.2"
lowerCamelCase__ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase__ : Optional[Any] = checkpoint['out.0.weight']
lowerCamelCase__ : str = checkpoint['out.0.bias']
lowerCamelCase__ : List[str] = checkpoint['out.2.weight']
lowerCamelCase__ : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_A : Tuple = parser.parse_args()
_A : str = strabool(args.class_cond)
_A : Optional[int] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : int = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_A : Optional[int] = None
_A : Any = con_pt_to_diffuser(args.unet_path, unet_config)
_A : List[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : str = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
_A : str = CMStochasticIterativeScheduler(**scheduler_config)
_A : Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=3_2 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=4 , UpperCAmelCase=[0, 1, 2, 3] , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=[1, 3_8_4, 2_4, 2_4] , UpperCAmelCase=True , UpperCAmelCase=None , ) -> List[str]:
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =backbone_out_indices
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =backbone_featmap_shape
_lowercase =scope
_lowercase =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def __A (self ) -> int:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase =self.get_config()
return config, pixel_values, labels
def __A (self ) -> Any:
_lowercase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =DPTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase =model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =self.num_labels
_lowercase =DPTForDepthEstimation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase =model(_UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =self.num_labels
_lowercase =DPTForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase =model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A (self ) -> Optional[Any]:
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _snake_case , _snake_case , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[Any]:
_lowercase =DPTModelTester(self )
_lowercase =ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __A (self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A (self ) -> str:
pass
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __A (self ) -> Dict:
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(_UpperCamelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __A (self ) -> int:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCamelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def __A (self ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
if model_class in get_values(_UpperCamelCase ):
continue
_lowercase =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_lowercase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_lowercase =model(**_UpperCamelCase ).loss
loss.backward()
def __A (self ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =False
_lowercase =True
if model_class in get_values(_UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_lowercase =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_lowercase =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_lowercase =model(**_UpperCamelCase ).loss
loss.backward()
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =_config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_lowercase =model_class(config=_UpperCamelCase )
# Skip the check for the backbone
_lowercase =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowercase =[f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A (self ) -> int:
pass
@slow
def __A (self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowercase =DPTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __A (self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase ='add'
with self.assertRaises(_UpperCamelCase ):
_lowercase =DPTForDepthEstimation(_UpperCamelCase )
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Optional[Any]:
_lowercase =DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_lowercase =DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_UpperCamelCase )
_lowercase =prepare_img()
_lowercase =image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**_UpperCamelCase )
_lowercase =outputs.predicted_depth
# verify the predicted depth
_lowercase =torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , _UpperCamelCase )
_lowercase =torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , _UpperCamelCase , atol=1e-4 ) )
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( _snake_case ):
"""simple docstring"""
UpperCamelCase : Any = DistilBertTokenizer
UpperCamelCase : int = DistilBertTokenizerFast
UpperCamelCase : Optional[int] = True
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_UpperCamelCase )
lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_UpperCamelCase )
lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 252 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A__: Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
A__: Union[str, Any] = '''\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'''
A__: List[str] = '''\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'''
A__: Tuple = '''\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str]=0.9 , SCREAMING_SNAKE_CASE :Tuple=3 , SCREAMING_SNAKE_CASE :str=0.5 ) -> List[str]:
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
_a : Any =[
meteor_score.single_meteor_score(
word_tokenize(_UpperCamelCase ) , word_tokenize(_UpperCamelCase ) , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase )
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase )
]
else:
_a : Dict =[
meteor_score.single_meteor_score(_UpperCamelCase , _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase )
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase )
]
return {"meteor": np.mean(_UpperCamelCase )}
| 276 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
__lowercase = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowercase = unsorted[j - 1], unsorted[j]
__lowercase = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
__lowercase = unsorted[j + 1], unsorted[j]
__lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(f'{cocktail_shaker_sort(unsorted) = }')
| 104 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _snake_case ( _snake_case ):
lowerCAmelCase_ : Tuple = '''trajectory_transformer'''
lowerCAmelCase_ : Tuple = ['''past_key_values''']
lowerCAmelCase_ : int = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a__=100 , a__=5 , a__=1 , a__=1 , a__=249 , a__=6 , a__=17 , a__=25 , a__=4 , a__=4 , a__=128 , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0_0_0_6 , a__=512 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=True , a__=1 , a__=50_256 , a__=50_256 , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( a_: Callable, a_: float, a_: float, a_: float, a_: float ):
_UpperCAmelCase : str = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
_UpperCAmelCase : Union[str, Any] = ya
_UpperCAmelCase : List[Any] = xa
for k in range(__snake_case ):
_UpperCAmelCase : Any = y[k] + step_size * ode_func(__snake_case, y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int = 400_0000 ) -> Any:
"""simple docstring"""
snake_case : int = [0, 1]
snake_case : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case : str = 0
for j in range(len(__snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
snake_case_ = (DDIMParallelScheduler,)
snake_case_ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def lowercase_ ( self , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(**_UpperCamelCase )
__lowerCamelCase = scheduler_class(**_UpperCamelCase )
__lowerCamelCase = 10, 0.0
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for t in scheduler.timesteps:
__lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase )
__lowerCamelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCamelCase )
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCamelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCamelCase )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=_UpperCamelCase )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_UpperCamelCase , num_inference_steps=_UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCamelCase , eta=_UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**_UpperCamelCase )
__lowerCamelCase = 10, 0.0
scheduler.set_timesteps(_UpperCamelCase )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = self.dummy_sample_deter + 0.1
__lowerCamelCase = self.dummy_sample_deter - 0.1
__lowerCamelCase = samplea.shape[0]
__lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
__lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCamelCase )
__lowerCamelCase = torch.sum(torch.abs(_UpperCamelCase ) )
__lowerCamelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.full_loop()
__lowerCamelCase = torch.sum(torch.abs(_UpperCamelCase ) )
__lowerCamelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.full_loop(prediction_type='v_prediction' )
__lowerCamelCase = torch.sum(torch.abs(_UpperCamelCase ) )
__lowerCamelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(_UpperCamelCase ) )
__lowerCamelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(_UpperCamelCase ) )
__lowerCamelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.txt'}
a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ConvBertTokenizer
def __init__( self : Any , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]="[UNK]" , lowerCAmelCase : List[Any]="[SEP]" , lowerCAmelCase : Union[str, Any]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Dict="[MASK]" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=None , **lowerCAmelCase : Optional[int] , ):
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_UpperCamelCase )
lowerCAmelCase = do_lower_case
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None ):
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int = None ):
lowerCAmelCase = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 155 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''BeitFeatureExtractor''']
_lowercase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = {'add_prefix_space': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(''' ''' ) else {}
lowerCamelCase__ : str = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding='''max_length''' if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : str = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( _snake_case ):
def __init__( self : str , A : Optional[Any] , A : Any , A : Optional[int] , A : int , A : str="train" , A : Dict=None , A : Optional[Any]=None , A : int=None , A : Tuple="" , ) ->Any:
super().__init__()
lowerCamelCase__ : Optional[Any] = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
lowerCamelCase__ : Dict = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
lowerCamelCase__ : Optional[Any] = self.get_char_lens(self.src_file )
lowerCamelCase__ : Dict = max_source_length
lowerCamelCase__ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
lowerCamelCase__ : List[Any] = tokenizer
lowerCamelCase__ : Tuple = prefix
if n_obs is not None:
lowerCamelCase__ : int = self.src_lens[:n_obs]
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : List[str] = tgt_lang
def __len__( self : str ) ->Dict:
return len(self.src_lens )
def __getitem__( self : List[Any] , A : List[str] ) ->Dict[str, torch.Tensor]:
lowerCamelCase__ : Union[str, Any] = index + 1 # linecache starts at 1
lowerCamelCase__ : Any = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
lowerCamelCase__ : Optional[int] = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
lowerCamelCase__ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
lowerCamelCase__ : str = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
lowerCamelCase__ : List[Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
lowerCamelCase__ : Optional[Any] = source_inputs['input_ids'].squeeze()
lowerCamelCase__ : Any = target_inputs['input_ids'].squeeze()
lowerCamelCase__ : int = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( A : Any ) ->Optional[int]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __lowerCamelCase ( self : int , A : List[str] ) ->Dict[str, torch.Tensor]:
lowerCamelCase__ : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCamelCase__ : int = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCamelCase__ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCamelCase__ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_A : List[str] = getLogger(__name__)
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
return list(itertools.chain.from_iterable(__snake_case ) )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , '''git_log.json''' ) )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) -> str:
"""simple docstring"""
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
with open(__snake_case ) as f:
return json.load(__snake_case )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : int = git.Repo(search_parent_directories=__snake_case )
lowerCamelCase__ : Tuple = {
'repo_id': str(__snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
return list(map(__snake_case , __snake_case ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(__snake_case , '''wb''' ) as f:
return pickle.dump(__snake_case , __snake_case )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __snake_case )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = normalize_answer(__snake_case ).split()
lowerCamelCase__ : str = normalize_answer(__snake_case ).split()
lowerCamelCase__ : Tuple = Counter(__snake_case ) & Counter(__snake_case )
lowerCamelCase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase__ : List[Any] = 1.0 * num_same / len(__snake_case )
lowerCamelCase__ : Tuple = 1.0 * num_same / len(__snake_case )
lowerCamelCase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert len(__snake_case ) == len(__snake_case )
lowerCamelCase__ : Any = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase__ : str = 'dropout_rate'
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
lowerCamelCase__ : Optional[Any] = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCAmelCase : Dict = object()
# For specifying empty leaf dict `{}`
UpperCAmelCase : List[str] = object()
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
lowerCamelCase = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
def replace(lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def __lowerCamelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __snake_case )),
(("transformer", "wte", "embedding"), P("""mp""" , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = _get_partition_rules()
lowerCamelCase = _replacement_rules(__snake_case )
lowerCamelCase = {k: _unmatched for k in flatten_dict(__snake_case )}
lowerCamelCase = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[Any]:
_a : Any =[False] * len(__snake_case )
_a : Dict =[-1] * len(__snake_case )
def dfs(_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ):
_a : Optional[Any] =True
_a : Dict =c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case ,1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case ,0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A__: str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 276 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( A__ , A__=0 ):
"""simple docstring"""
return sorted(__snake_case , key=lambda A__ : x[column] )
def _A ( A__ , A__ , A__=float('''inf''' ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , __snake_case ):
__lowercase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowercase = current_dis
return min_dis
def _A ( A__ , A__ , A__=float('''inf''' ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , __snake_case ):
for j in range(max(0 , i - 6 ) , __snake_case ):
__lowercase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowercase = current_dis
return min_dis
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__snake_case , __snake_case )
# recursion
__lowercase = points_counts // 2
__lowercase = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[:mid] , __snake_case )
__lowercase = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__lowercase = min(__snake_case , __snake_case )
__lowercase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__snake_case )
__lowercase = dis_between_closest_in_strip(
__snake_case , len(__snake_case ) , __snake_case )
return min(__snake_case , __snake_case )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = column_based_sort(__snake_case , column=0 )
__lowercase = column_based_sort(__snake_case , column=1 )
return (
closest_pair_of_points_sqr(
__snake_case , __snake_case , __snake_case )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 104 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
lowerCAmelCase_ : Dict = ['''pixel_values''']
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , a__ = True , **a__ , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
snake_case_ = size if size is not None else {'height': 384, 'width': 384}
snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def lowerCAmelCase__ ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
snake_case_ = (size['height'], size['width'])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self , a__ , a__ , a__ = None , **a__ , ) -> Dict:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
snake_case_ = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
snake_case_ = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
return encoded_outputs
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from string import ascii_uppercase
__a = {char: i for i, char in enumerate(ascii_uppercase)}
__a = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Any = len(__snake_case )
_UpperCAmelCase : Optional[Any] = 0
while True:
if x == i:
_UpperCAmelCase : List[Any] = 0
if len(__snake_case ) == len(__snake_case ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : List[str] = ''
_UpperCAmelCase : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_UpperCAmelCase : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Any = ''
_UpperCAmelCase : List[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = 'THE GERMAN ATTACK'
_UpperCAmelCase : Optional[int] = 'SECRET'
_UpperCAmelCase : List[Any] = generate_key(__snake_case, __snake_case )
_UpperCAmelCase : str = cipher_text(__snake_case, __snake_case )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(__snake_case, __snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 145 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = logging.get_logger()
# the current default level is logging.WARNING
snake_case : str = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_UpperCamelCase )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = logging.get_verbosity()
snake_case : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case : Dict = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_UpperCamelCase ) as cl:
logger.warning(_UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_UpperCamelCase ) as cl:
logger.warning(_UpperCamelCase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_UpperCamelCase ) as cl:
logger.warning(_UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(_UpperCamelCase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
snake_case : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case : Tuple = os.getenv("TRANSFORMERS_VERBOSITY" , _UpperCamelCase )
snake_case : List[str] = logging.log_levels[env_level_str]
snake_case : Dict = logging.get_verbosity()
self.assertEqual(
_UpperCamelCase , _UpperCamelCase , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
snake_case : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
snake_case : Any = logging.logging.getLogger()
with CaptureLogger(_UpperCamelCase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
snake_case : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(_UpperCamelCase ) as cl:
logger.warning_advice(_UpperCamelCase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_UpperCamelCase ) as cl:
logger.warning_advice(_UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=0.9_99 , UpperCamelCase__ : str="cosine" , ) -> List[str]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(__snake_case ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __lowerCAmelCase ( _snake_case , _snake_case ):
"""simple docstring"""
snake_case_ = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ = 2
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = 0.0_00_85 , lowerCamelCase__ = 0.0_12 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ) -> Optional[Any]:
'''simple docstring'''
if trained_betas is not None:
__lowerCamelCase = torch.tensor(_UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(_UpperCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[Any]:
'''simple docstring'''
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(_UpperCamelCase ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
__lowerCamelCase = self.index_for_timestep(_UpperCamelCase )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , _UpperCamelCase , dtype=_UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , _UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(_UpperCamelCase , 0 , -step_ratio )).round().copy().astype(_UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(_UpperCamelCase ) ).to(_UpperCamelCase )
__lowerCamelCase = np.interp(_UpperCamelCase , np.arange(0 , len(_UpperCamelCase ) ) , _UpperCamelCase )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_UpperCamelCase ).startswith('mps' ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(_UpperCamelCase ).to(_UpperCamelCase , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.sample is None
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = self.index_for_timestep(_UpperCamelCase )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCamelCase ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(_UpperCamelCase , _UpperCamelCase ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 90 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : str , snake_case__ : str = " " ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = 0
for index, char in enumerate(__snake_case ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase = index + 1
elif index + 1 == len(__snake_case ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 155 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_snake_case ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''speech''']
def __init__( self : Optional[Any] ,*A_ : int ,**A_ : str ) -> Optional[Any]:
requires_backends(self ,['speech'] )
class lowerCAmelCase_ ( metaclass=_snake_case ):
'''simple docstring'''
_lowerCamelCase: int = ['''speech''']
def __init__( self : Tuple ,*A_ : int ,**A_ : str ) -> Any:
requires_backends(self ,['speech'] ) | 74 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
_A : str = {
'km/h': 1.0,
'm/s': 3.6,
'mph': 1.609_344,
'knot': 1.852,
}
_A : Dict = {
'km/h': 1.0,
'm/s': 0.277_777_778,
'mph': 0.621_371_192,
'knot': 0.539_956_803,
}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCamelCase__ : Optional[int] = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
if (ksize % 2) == 0:
_lowercase =ksize + 1
_lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_lowercase =x - ksize // 2
_lowercase =y - ksize // 2
# degree to radiant
_lowercase =theta / 180 * np.pi
_lowercase =np.cos(_theta )
_lowercase =np.sin(_theta )
# get kernel x
_lowercase =cos_theta * px + sin_theta * py
# get kernel y
_lowercase =-sin_theta * px + cos_theta * py
# fill kernel
_lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase__ = out / out.max() * 255
UpperCAmelCase__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowercase ( _snake_case ):
"""simple docstring"""
def __A ( self , A ) -> int:
'''simple docstring'''
with open(_UpperCamelCase , encoding="""utf-8""" ) as input_file:
lowerCamelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowerCamelCase = input_file.read()
lowerCamelCase = regexp.search(_UpperCamelCase )
return match
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCamelCase , encoding="""utf-8""" ) as input_file:
lowerCamelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowerCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase = regexp.finditer(_UpperCamelCase )
lowerCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = Path("""./datasets""" )
lowerCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = Path("""./datasets""" )
lowerCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 252 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A__: Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( _snake_case , unittest.TestCase ):
__UpperCamelCase : int = XLNetTokenizer
__UpperCamelCase : int = XLNetTokenizerFast
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[Any] = True
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Optional[Any] =XLNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Dict ='<s>'
_a : str =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(_UpperCamelCase ) , 1_0_0_6 )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] =XLNetTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_a : Tuple =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_a : int =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_a : Optional[int] =tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_a : Union[str, Any] =tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =XLNetTokenizer(_UpperCamelCase , do_lower_case=_UpperCamelCase )
_a : List[str] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
_a : Dict =XLNetTokenizer(_UpperCamelCase , do_lower_case=_UpperCamelCase )
_a : List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : str =XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_a : Union[str, Any] =tokenizer.encode("""sequence builders""" , add_special_tokens=_UpperCamelCase )
_a : List[Any] =tokenizer.encode("""multi-sequence build""" , add_special_tokens=_UpperCamelCase )
_a : Optional[int] =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_a : Tuple =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
# fmt: off
_a : List[Any] ={'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 276 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
'''simple docstring'''
import math
import sys
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''
try:
with open(__snake_case , '''rb''' ) as binary_file:
__lowercase = binary_file.read()
for dat in data:
__lowercase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _A ( A__ ):
"""simple docstring"""
__lowercase = {'0': '0', '1': '1'}
__lowercase = '', ''
__lowercase = len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowercase = lexicon[curr_string]
result += last_match_id
__lowercase = last_match_id + '0'
if math.loga(__snake_case ).is_integer():
__lowercase = {}
for curr_key in list(__snake_case ):
__lowercase = lexicon.pop(__snake_case )
__lowercase = new_lex
__lowercase = last_match_id + '1'
index += 1
__lowercase = ''
return result
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = 8
try:
with open(__snake_case , '''wb''' ) as opened_file:
__lowercase = [
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case ) , __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowercase = data_bits[counter:]
__lowercase = data_bits[counter + 1 :]
return data_bits
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = read_file_binary(__snake_case )
__lowercase = remove_prefix(__snake_case )
__lowercase = decompress_data(__snake_case )
write_file_binary(__snake_case , __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 104 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_SCREAMING_SNAKE_CASE : Dict = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase_( snake_case : int , snake_case : Union[str, Any] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case )
snake_case_ = TestCommand(*__snake_case )
test_command.run()
snake_case_ = os.path.join(__snake_case , "README.md" )
assert os.path.exists(__snake_case )
snake_case_ = DatasetInfosDict.from_directory(__snake_case )
snake_case_ = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
snake_case_ = getattr(dataset_infos["default"] , __snake_case ), getattr(expected_dataset_infos["default"] , __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case , __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( _snake_case ):
"""simple docstring"""
UpperCamelCase_ : str = '''visual_bert'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int]=3_0_5_2_2 , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : str=5_1_2 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[int]=1_2 , lowerCAmelCase__ : List[Any]=3_0_7_2 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=5_1_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : int=1e-12 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : str=2 , **lowerCAmelCase__ : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Any = visual_embedding_dim
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Any = bypass_transformer
_UpperCAmelCase : Union[str, Any] = special_visual_initialize | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : List[str] , lowercase : str , lowercase : int , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str ) -> Dict:
"""simple docstring"""
if index == r:
for j in range(__snake_case ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case : Tuple = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> Dict:
"""simple docstring"""
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case )
for node in graph )
def lowerCamelCase_ ( UpperCamelCase__ : dict , UpperCamelCase__ : int , UpperCamelCase__ : set , UpperCamelCase__ : set ) -> Any:
"""simple docstring"""
visited.add(__snake_case )
rec_stk.add(__snake_case )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__snake_case )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = '▁'
a = {'vocab_file': 'sentencepiece.bpe.model'}
a = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
a = {
'facebook/mbart-large-50-one-to-many-mmt': 1_0_2_4,
}
# fmt: off
a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['''input_ids''', '''attention_mask''']
_a = []
_a = []
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Tuple="<s>" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : List[Any]="<mask>" , lowerCAmelCase : int = None , **lowerCAmelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase = 1
lowerCAmelCase = len(self.sp_model )
lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase = self.lang_code_to_id[self._src_lang]
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self : str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowercase ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def __lowercase ( self : List[Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : List[str] , lowerCAmelCase : Dict ):
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __lowercase ( self : int , lowerCAmelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : Any , lowerCAmelCase : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : int ):
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
lowerCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __lowercase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __lowercase ( self : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Optional[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase = self.convert_tokens_to_ids(_UpperCamelCase )
lowerCAmelCase = tgt_lang_id
return inputs
def __lowercase ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple = "en_XX" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = "ro_RO" , **lowerCAmelCase : Any , ):
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __lowercase ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Tuple , lowerCAmelCase : int ):
lowerCAmelCase = self.lang_code_to_id[src_lang]
lowerCAmelCase = [self.cur_lang_code_id]
lowerCAmelCase = [self.eos_token_id]
def __lowercase ( self : str , lowerCAmelCase : str ):
lowerCAmelCase = self.lang_code_to_id[tgt_lang]
lowerCAmelCase = [self.cur_lang_code_id]
lowerCAmelCase = [self.eos_token_id]
| 155 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Dict="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) as f:
A = json.load(__snake_case )
A = {}
A = []
A = []
for key, info in class_info.items():
A = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(__snake_case ) )
A = thing_ids
A = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple ,A_ : Union[str, Any] ,A_ : str=7 ,A_ : int=3 ,A_ : Tuple=30 ,A_ : Optional[Any]=400 ,A_ : int=None ,A_ : List[str]=True ,A_ : Union[str, Any]=True ,A_ : List[str]=[0.5, 0.5, 0.5] ,A_ : Union[str, Any]=[0.5, 0.5, 0.5] ,A_ : List[Any]=10 ,A_ : Tuple=False ,A_ : str=255 ,A_ : Union[str, Any]="shi-labs/oneformer_demo" ,A_ : Any="ade20k_panoptic.json" ,A_ : Optional[Any]=10 ,) -> Tuple:
A = parent
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
A = do_resize
A = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
A = do_normalize
A = image_mean
A = image_std
A = class_info_file
A = prepare_metadata(_UpperCamelCase ,_UpperCamelCase )
A = num_text
A = repo_path
# for the post_process_functions
A = 2
A = 10
A = 10
A = 3
A = 4
A = num_labels
A = do_reduce_labels
A = ignore_index
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : List[Any]=False ) -> Dict:
if not batched:
A = image_inputs[0]
if isinstance(_UpperCamelCase ,Image.Image ):
A = image.size
else:
A = image.shape[1], image.shape[2]
if w < h:
A = int(self.size['shortest_edge'] * h / w )
A = self.size['shortest_edge']
elif w > h:
A = self.size['shortest_edge']
A = int(self.size['shortest_edge'] * w / h )
else:
A = self.size['shortest_edge']
A = self.size['shortest_edge']
else:
A = []
for image in image_inputs:
A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(_UpperCamelCase ,key=lambda A_ : item[0] )[0]
A = max(_UpperCamelCase ,key=lambda A_ : item[1] )[1]
return expected_height, expected_width
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) ,masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) ,)
@require_torch
@require_vision
class lowerCAmelCase_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCamelCase: Union[str, Any] = image_processing_class
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = OneFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'size' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'ignore_index' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'class_info_file' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'num_text' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'repo_path' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'metadata' ) )
self.assertTrue(hasattr(_UpperCamelCase ,'do_reduce_labels' ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processing_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
A = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
A = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A = self.image_processing_tester.get_expected_values(_UpperCamelCase ,batched=_UpperCamelCase )
A = image_processor(
_UpperCamelCase ,['semantic'] * len(_UpperCamelCase ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processing_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
A = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
A = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A = self.image_processing_tester.get_expected_values(_UpperCamelCase ,batched=_UpperCamelCase )
A = image_processor(
_UpperCamelCase ,['semantic'] * len(_UpperCamelCase ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processing_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
A = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
A = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A = self.image_processing_tester.get_expected_values(_UpperCamelCase ,batched=_UpperCamelCase )
A = image_processor(
_UpperCamelCase ,['semantic'] * len(_UpperCamelCase ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any]=False ,A_ : List[Any]=False ,A_ : str="np" ) -> Dict:
A = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A = self.image_processing_tester.num_labels
A = None
A = None
A = prepare_image_inputs(self.image_processing_tester ,equal_resolution=_UpperCamelCase )
if with_segmentation_maps:
A = num_labels
if is_instance_map:
A = list(range(_UpperCamelCase ) ) * 2
A = dict(enumerate(_UpperCamelCase ) )
A = [
np.random.randint(0 ,high * 2 ,(img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A = [Image.fromarray(_UpperCamelCase ) for annotation in annotations]
A = image_processor(
_UpperCamelCase ,['semantic'] * len(_UpperCamelCase ) ,_UpperCamelCase ,return_tensors='pt' ,instance_id_to_semantic_id=_UpperCamelCase ,pad_and_return_pixel_mask=_UpperCamelCase ,)
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
def common(A_ : str=False ,A_ : Optional[Any]=None ):
A = self.comm_get_image_processor_inputs(
with_segmentation_maps=_UpperCamelCase ,is_instance_map=_UpperCamelCase ,segmentation_type=_UpperCamelCase )
A = inputs['mask_labels']
A = inputs['class_labels']
A = inputs['pixel_values']
A = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ):
self.assertEqual(mask_label.shape[0] ,class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] ,pixel_values.shape[2:] )
self.assertEqual(len(_UpperCamelCase ) ,self.image_processing_tester.num_text )
common()
common(is_instance_map=_UpperCamelCase )
common(is_instance_map=_UpperCamelCase ,segmentation_type='pil' )
common(is_instance_map=_UpperCamelCase ,segmentation_type='pil' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = np.zeros((20, 50) )
A = 1
A = 1
A = 1
A = binary_mask_to_rle(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) ,4 )
self.assertEqual(rle[0] ,21 )
self.assertEqual(rle[1] ,45 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
A = self.image_processing_tester.get_fake_oneformer_outputs()
A = fature_extractor.post_process_semantic_segmentation(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) ,self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape ,(
self.image_processing_tester.height,
self.image_processing_tester.width,
) ,)
A = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A = fature_extractor.post_process_semantic_segmentation(_UpperCamelCase ,target_sizes=_UpperCamelCase )
self.assertEqual(segmentation[0].shape ,target_sizes[0] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
A = self.image_processing_tester.get_fake_oneformer_outputs()
A = image_processor.post_process_instance_segmentation(_UpperCamelCase ,threshold=0 )
self.assertTrue(len(_UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) ,_UpperCamelCase )
self.assertEqual(
el['segmentation'].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
A = self.image_processing_tester.get_fake_oneformer_outputs()
A = image_processor.post_process_panoptic_segmentation(_UpperCamelCase ,threshold=0 )
self.assertTrue(len(_UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) ,_UpperCamelCase )
self.assertEqual(
el['segmentation'].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) ) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import qiskit
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCamelCase__ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase__ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
_A : Union[str, Any] = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = '''data2vec-audio'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_6 , UpperCAmelCase=1_9 , UpperCAmelCase=5 , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=0 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
_lowercase =hidden_size
_lowercase =feat_extract_activation
_lowercase =list(_UpperCamelCase )
_lowercase =list(_UpperCamelCase )
_lowercase =list(_UpperCamelCase )
_lowercase =conv_bias
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =conv_pos_kernel_size
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =vocab_size
_lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
_lowercase =mask_feature_min_masks
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(_UpperCamelCase )
_lowercase =list(_UpperCamelCase )
_lowercase =list(_UpperCamelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> str:
return math.prod(self.conv_stride )
| 5 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Optional[int] = 16
UpperCAmelCase : Optional[int] = 32
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase = 8
else:
lowerCamelCase = None
return tokenizer.pad(
__snake_case , padding="""longest""" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : Any = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __snake_case ) == "1":
lowerCamelCase = 2
# New Code #
lowerCamelCase = int(args.gradient_accumulation_steps )
lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config['lr']
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__snake_case )
lowerCamelCase = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
lowerCamelCase = model(**__snake_case )
lowerCamelCase = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**__snake_case )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__snake_case , default=__snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__snake_case , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__snake_case , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any]=1_3 , SCREAMING_SNAKE_CASE :str=7 , SCREAMING_SNAKE_CASE :int=6 , SCREAMING_SNAKE_CASE :Dict=1_7 , SCREAMING_SNAKE_CASE :Dict=2_3 , SCREAMING_SNAKE_CASE :List[Any]=1_1 , SCREAMING_SNAKE_CASE :int=True , ) -> int:
'''simple docstring'''
_a : Dict =parent
_a : str =batch_size
_a : List[Any] =seq_length
_a : Any =act_dim
_a : Dict =state_dim
_a : List[str] =hidden_size
_a : List[Any] =max_length
_a : Optional[Any] =is_training
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a : Tuple =floats_tensor((self.batch_size, self.seq_length, 1) )
_a : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
_a : str =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
_a : Optional[Any] =random_attention_mask((self.batch_size, self.seq_length) )
_a : Dict =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , ) -> List[str]:
'''simple docstring'''
_a : List[str] =DecisionTransformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_a : Dict =model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
(
_a
) : List[str] =config_and_inputs
_a : Any ={
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
__UpperCamelCase : int = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Any = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase : List[Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : int = False
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : List[Any] = False
def __UpperCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
_a : str =DecisionTransformerModelTester(self )
_a : str =ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
_a : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int =DecisionTransformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] =model_class(_UpperCamelCase )
_a : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] =[*signature.parameters.keys()]
_a : List[str] =[
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_UpperCamelCase )] , _UpperCamelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
_a : Optional[Any] =2 # number of steps of autoregressive prediction we will perform
_a : Optional[Any] =1_0 # defined by the RL environment, may be normalized
_a : Tuple =DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
_a : int =model.to(_UpperCamelCase )
_a : int =model.config
torch.manual_seed(0 )
_a : str =torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ) # env.reset()
_a : Tuple =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=_UpperCamelCase )
_a : Tuple =torch.tensor(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a : Optional[int] =state
_a : str =torch.zeros(1 , 0 , config.act_dim , device=_UpperCamelCase , dtype=torch.floataa )
_a : List[Any] =torch.zeros(1 , 0 , device=_UpperCamelCase , dtype=torch.floataa )
_a : Optional[int] =torch.tensor(0 , device=_UpperCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCamelCase ):
_a : Union[str, Any] =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCamelCase )] , dim=1 )
_a : Tuple =torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCamelCase )] , dim=1 )
_a : List[str] =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a : Optional[int] =model(
states=_UpperCamelCase , actions=_UpperCamelCase , rewards=_UpperCamelCase , returns_to_go=_UpperCamelCase , timesteps=_UpperCamelCase , attention_mask=_UpperCamelCase , return_dict=_UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_a : Union[str, Any] =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
_a : List[str] =action_pred[0, -1]
_a : Tuple =torch.cat([states, state] , dim=1 )
_a : Tuple =returns_to_go[0, -1] - reward
_a : Optional[Any] =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a : Optional[Any] =torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 276 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int=None ,**lowercase__ : Tuple ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
__lowercase = model
__lowercase = kwargs.get('''model_save_dir''' ,_UpperCamelCase )
__lowercase = kwargs.get('''latest_model_name''' ,_UpperCamelCase )
def __call__( self : Union[str, Any] ,**lowercase__ : Union[str, Any] ):
__lowercase = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase ,_UpperCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : str ,lowercase__ : Optional[int]=None ,lowercase__ : str=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
__lowercase = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase ,providers=[provider] ,sess_options=_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any] = None ,**lowercase__ : Dict ):
__lowercase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowercase = self.model_save_dir.joinpath(self.latest_model_name )
__lowercase = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase ,_UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowercase = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
__lowercase = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase ,_UpperCamelCase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ,**lowercase__ : Any ,):
if os.path.isfile(_UpperCamelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase ,**_UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] = None ,lowercase__ : List[str] = None ,lowercase__ : str = False ,lowercase__ : Dict = None ,lowercase__ : str = None ,lowercase__ : Dict = None ,lowercase__ : Any = None ,**lowercase__ : Optional[int] ,):
__lowercase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
__lowercase = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase ,_UpperCamelCase ) ,provider=_UpperCamelCase ,sess_options=_UpperCamelCase )
__lowercase = Path(_UpperCamelCase )
# load model from hub
else:
# download model
__lowercase = hf_hub_download(
repo_id=_UpperCamelCase ,filename=_UpperCamelCase ,use_auth_token=_UpperCamelCase ,revision=_UpperCamelCase ,cache_dir=_UpperCamelCase ,force_download=_UpperCamelCase ,)
__lowercase = Path(_UpperCamelCase ).parent
__lowercase = Path(_UpperCamelCase ).name
__lowercase = OnnxRuntimeModel.load_model(_UpperCamelCase ,provider=_UpperCamelCase ,sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase ,**_UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ,lowercase__ : Optional[Any] ,lowercase__ : Dict = True ,lowercase__ : Dict = None ,lowercase__ : int = None ,**lowercase__ : str ,):
__lowercase = None
if len(str(_UpperCamelCase ).split('''@''' ) ) == 2:
__lowercase = model_id.split('''@''' )
return cls._from_pretrained(
model_id=_UpperCamelCase ,revision=_UpperCamelCase ,cache_dir=_UpperCamelCase ,force_download=_UpperCamelCase ,use_auth_token=_UpperCamelCase ,**_UpperCamelCase ,)
| 104 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
import operator
def UpperCamelCase_( snake_case : list , snake_case : bool = False , snake_case : list | None = None ):
'''simple docstring'''
snake_case_ = operator.lt if reverse else operator.gt
snake_case_ = solution or []
if not arr:
return solution
snake_case_ = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
snake_case_ = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = IFInpaintingPipeline
UpperCamelCase_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any]=0 ) -> int:
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_UpperCAmelCase : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_UpperCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self._test_save_load_local()
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 145 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( _snake_case ):
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : ScoreSdeVeScheduler
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 2000 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
snake_case : List[Any] = self.unet.config.sample_size
snake_case : Optional[Any] = (batch_size, 3, img_size, img_size)
snake_case : Dict = self.unet
snake_case : Optional[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
snake_case : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case : str = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
snake_case : Tuple = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
snake_case : str = model(_UpperCamelCase , _UpperCamelCase ).sample
snake_case : List[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
snake_case : Any = output.prev_sample, output.prev_sample_mean
snake_case : Any = sample_mean.clamp(0 , 1 )
snake_case : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
return abs(__snake_case ) if a == 0 else greatest_common_divisor(b % a , __snake_case )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowerCamelCase = y, x % y
return abs(__snake_case )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
try:
__lowerCamelCase = input('Enter two integers separated by comma (,): ' ).split(',' )
__lowerCamelCase = int(nums[0] )
__lowerCamelCase = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__snake_case , __snake_case )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 90 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = 2_5_6
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
_a = ['''melgan''']
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , ):
super().__init__()
# From MELGAN
lowerCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
lowerCAmelCase = 4.0 # Largest value for most examples
lowerCAmelCase = 128
self.register_modules(
notes_encoder=_UpperCamelCase , continuous_encoder=_UpperCamelCase , decoder=_UpperCamelCase , scheduler=_UpperCamelCase , melgan=_UpperCamelCase , )
def __lowercase ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=(-1.0, 1.0) , lowerCAmelCase : Any=False ):
lowerCAmelCase = output_range
if clip:
lowerCAmelCase = torch.clip(_UpperCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowerCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict=(-1.0, 1.0) , lowerCAmelCase : Optional[Any]=False ):
lowerCAmelCase = input_range
lowerCAmelCase = torch.clip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if clip else outputs
# Scale to [0, 1].
lowerCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __lowercase ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
lowerCAmelCase = input_tokens > 0
lowerCAmelCase = self.notes_encoder(
encoder_input_tokens=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase )
lowerCAmelCase = self.continuous_encoder(
encoder_inputs=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any ):
lowerCAmelCase = noise_time
if not torch.is_tensor(_UpperCamelCase ):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase = self.decoder(
encodings_and_masks=_UpperCamelCase , decoder_input_tokens=_UpperCamelCase , decoder_noise_time=_UpperCamelCase )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] = None , lowerCAmelCase : Tuple = 100 , lowerCAmelCase : Any = True , lowerCAmelCase : List[str] = "numpy" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCamelCase )}.''' )
lowerCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowerCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowerCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCamelCase ):
if i == 0:
lowerCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCAmelCase = ones
lowerCAmelCase = self.scale_features(
_UpperCamelCase , output_range=[-1.0, 1.0] , clip=_UpperCamelCase )
lowerCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCamelCase , continuous_mask=_UpperCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase = self.decode(
encodings_and_masks=_UpperCamelCase , input_tokens=_UpperCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
lowerCAmelCase = self.scale_to_features(_UpperCamelCase , input_range=[-1.0, 1.0] )
lowerCAmelCase = mel[:1]
lowerCAmelCase = mel.cpu().float().numpy()
lowerCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase )
logger.info("""Generated segment""" , _UpperCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.""" )
if output_type == "numpy":
lowerCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCamelCase )
| 155 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] ,*A_ : str ,**A_ : Union[str, Any] ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase ) | 74 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : int = torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase__ : List[str] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase__ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase__ : str = v
else:
lowerCamelCase__ : List[Any] = v
lowerCamelCase__ : Optional[Any] = chkpt['params']
lowerCamelCase__ : int = {n: v for n, v in config.items() if not isinstance(__snake_case , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase__ : Optional[Any] = chkpt['dico_word2id']
lowerCamelCase__ : str = {s + '</w>' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase__ : Union[str, Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase__ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(__snake_case , __snake_case )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '''\n''' )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A : Union[str, Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = '''ctrl'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , UpperCAmelCase=2_4_6_5_3_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2_8_0 , UpperCAmelCase=8_1_9_2 , UpperCAmelCase=4_8 , UpperCAmelCase=1_6 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=0.02 , UpperCAmelCase=True , **UpperCAmelCase , ) -> Union[str, Any]:
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =dff
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
super().__init__(**_UpperCamelCase )
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCAmelCase : Dict = TypeVar("T")
class __lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self , A , A ) -> None:
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = len(_UpperCamelCase )
lowerCamelCase = [any_type for _ in range(self.N )] + arr
lowerCamelCase = fnc
self.build()
def __A ( self ) -> None:
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , A , A ) -> None:
'''simple docstring'''
p += self.N
lowerCamelCase = v
while p > 1:
lowerCamelCase = p // 2
lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , A , A ) -> T | None: # noqa: E741
'''simple docstring'''
lowerCamelCase = l + self.N, r + self.N
lowerCamelCase = None
while l <= r:
if l % 2 == 1:
lowerCamelCase = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l] )
if r % 2 == 0:
lowerCamelCase = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r] )
lowerCamelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCAmelCase : Tuple = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCAmelCase : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCAmelCase : Optional[int] = SegmentTree(test_array, min)
UpperCAmelCase : Tuple = SegmentTree(test_array, max)
UpperCAmelCase : Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
lowerCamelCase = reduce(__snake_case , test_array[i : j + 1] )
lowerCamelCase = reduce(__snake_case , test_array[i : j + 1] )
lowerCamelCase = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
UpperCAmelCase : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 252 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Optional[Any] = logging.get_logger(__name__)
A__: Tuple = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( _snake_case ):
__UpperCamelCase : str = '''time_series_transformer'''
__UpperCamelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :Optional[Any] = None , SCREAMING_SNAKE_CASE :Union[str, Any] = "student_t" , SCREAMING_SNAKE_CASE :int = "nll" , SCREAMING_SNAKE_CASE :List[str] = 1 , SCREAMING_SNAKE_CASE :Optional[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE :Optional[int] = "mean" , SCREAMING_SNAKE_CASE :Optional[int] = 0 , SCREAMING_SNAKE_CASE :Tuple = 0 , SCREAMING_SNAKE_CASE :Union[str, Any] = 0 , SCREAMING_SNAKE_CASE :Any = 0 , SCREAMING_SNAKE_CASE :List[Any] = None , SCREAMING_SNAKE_CASE :int = None , SCREAMING_SNAKE_CASE :Dict = 3_2 , SCREAMING_SNAKE_CASE :Dict = 3_2 , SCREAMING_SNAKE_CASE :Union[str, Any] = 2 , SCREAMING_SNAKE_CASE :List[str] = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :List[str] = 2 , SCREAMING_SNAKE_CASE :Optional[int] = True , SCREAMING_SNAKE_CASE :Tuple = "gelu" , SCREAMING_SNAKE_CASE :List[Any] = 6_4 , SCREAMING_SNAKE_CASE :str = 0.1 , SCREAMING_SNAKE_CASE :Dict = 0.1 , SCREAMING_SNAKE_CASE :Dict = 0.1 , SCREAMING_SNAKE_CASE :Union[str, Any] = 0.1 , SCREAMING_SNAKE_CASE :Optional[int] = 0.1 , SCREAMING_SNAKE_CASE :Union[str, Any] = 1_0_0 , SCREAMING_SNAKE_CASE :int = 0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=True , **SCREAMING_SNAKE_CASE :Any , ) -> Optional[int]:
'''simple docstring'''
# time series specific configuration
_a : Any =prediction_length
_a : int =context_length or prediction_length
_a : Tuple =distribution_output
_a : Optional[int] =loss
_a : Optional[Any] =input_size
_a : List[Any] =num_time_features
_a : str =lags_sequence
_a : Union[str, Any] =scaling
_a : List[Any] =num_dynamic_real_features
_a : Tuple =num_static_real_features
_a : Tuple =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_a : str =cardinality
else:
_a : List[str] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_a : Any =embedding_dimension
else:
_a : str =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_a : int =num_parallel_samples
# Transformer architecture configuration
_a : str =input_size * len(_UpperCamelCase ) + self._number_of_features
_a : Optional[Any] =d_model
_a : List[Any] =encoder_attention_heads
_a : List[Any] =decoder_attention_heads
_a : int =encoder_ffn_dim
_a : Any =decoder_ffn_dim
_a : Dict =encoder_layers
_a : Tuple =decoder_layers
_a : Union[str, Any] =dropout
_a : Optional[Any] =attention_dropout
_a : Optional[Any] =activation_dropout
_a : List[str] =encoder_layerdrop
_a : List[str] =decoder_layerdrop
_a : int =activation_function
_a : List[Any] =init_std
_a : Optional[Any] =use_cache
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 276 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowerCAmelCase__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
class lowercase_ (_snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = '''left'''
def __init__( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str]=False ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[Any]=False ,lowercase__ : List[str]="<s>" ,lowercase__ : List[str]="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : Tuple="<sep>" ,lowercase__ : Union[str, Any]="<pad>" ,lowercase__ : Any="<cls>" ,lowercase__ : Dict="<mask>" ,lowercase__ : List[str]=["<eop>", "<eod>"] ,lowercase__ : Optional[Any] = None ,**lowercase__ : Tuple ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase ,remove_space=_UpperCamelCase ,keep_accents=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCamelCase ,)
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[Any] ,lowercase__ : Dict ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
if self.remove_space:
__lowercase = ' '.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
__lowercase = unicodedata.normalize('''NFKD''' ,_UpperCamelCase )
__lowercase = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ):
__lowercase = self.preprocess_text(_UpperCamelCase )
__lowercase = self.sp_model.encode(_UpperCamelCase ,out_type=_UpperCamelCase )
__lowercase = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
return self.sp_model.PieceToId(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[str] ):
return self.sp_model.IdToPiece(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
__lowercase = ''.join(_UpperCamelCase ).replace(_UpperCamelCase ,''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Any = False ,lowercase__ : int = None ,lowercase__ : Any = True ,**lowercase__ : List[Any] ,):
__lowercase = kwargs.pop('''use_source_tokenizer''' ,_UpperCamelCase )
__lowercase = self.convert_ids_to_tokens(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
__lowercase = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase = ''.join(_UpperCamelCase )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : Dict = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : Any = None ,lowercase__ : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1, 1]
return ([0] * len(_UpperCamelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[Any] ,lowercase__ : str = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_UpperCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 104 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE : Tuple = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCAmelCase_ : bool = None
lowerCAmelCase_ : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
lowerCAmelCase_ : int = datasets.Audio()
lowerCAmelCase_ : Optional[int] = '''audio'''
lowerCAmelCase_ : int = AudioFolderConfig
lowerCAmelCase_ : List[str] # definition at the bottom of the script
lowerCAmelCase_ : Dict = AudioClassification(audio_column="audio" , label_column="label" )
_SCREAMING_SNAKE_CASE : List[Any] = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE : Tuple = AUDIO_EXTENSIONS
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['ViTFeatureExtractor']
__a = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __lowerCAmelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = min(__snake_case , 50 ) # Prevent abuse!
snake_case : Dict = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
snake_case : Any = requests.get("https://www.google.com/search" , params=__snake_case , headers=__snake_case )
snake_case : Dict = BeautifulSoup(html.text , "html.parser" )
snake_case : Any = ''.join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case : int = json.dumps(__snake_case )
snake_case : List[Any] = json.loads(__snake_case )
snake_case : Union[str, Any] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __snake_case , )
if not matched_google_image_data:
return 0
snake_case : Union[str, Any] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__snake_case ) , )
snake_case : Optional[int] = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
snake_case : Optional[int] = bytes(__snake_case , "ascii" ).decode(
"unicode-escape" )
snake_case : Union[str, Any] = bytes(__snake_case , "ascii" ).decode(
"unicode-escape" )
snake_case : Union[str, Any] = urllib.request.build_opener()
snake_case : Dict = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__snake_case )
snake_case : Union[str, Any] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__snake_case = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : int = 1_000_000 ) -> Dict:
'''simple docstring'''
lowerCAmelCase = set(range(3 , __snake_case , 2 ) )
primes.add(2 )
for p in range(3 , __snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __snake_case , __snake_case ) ) )
lowerCAmelCase = [float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case , limit + 1 , __snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _snake_case ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''efficientformer'''
def __init__( self : List[Any] ,A_ : Tuple = [3, 2, 6, 4] ,A_ : Dict = [48, 96, 224, 448] ,A_ : Any = [True, True, True, True] ,A_ : Optional[Any] = 448 ,A_ : List[Any] = 32 ,A_ : int = 4 ,A_ : Optional[int] = 7 ,A_ : List[str] = 5 ,A_ : Union[str, Any] = 8 ,A_ : List[Any] = 4 ,A_ : Optional[Any] = 0.0 ,A_ : str = 16 ,A_ : Any = 3 ,A_ : int = 3 ,A_ : Union[str, Any] = 3 ,A_ : Union[str, Any] = 2 ,A_ : int = 1 ,A_ : List[Any] = 0.0 ,A_ : Tuple = 1 ,A_ : List[str] = True ,A_ : Dict = True ,A_ : Optional[Any] = 1e-5 ,A_ : Optional[int] = "gelu" ,A_ : Optional[Any] = 0.02 ,A_ : int = 1e-12 ,A_ : Union[str, Any] = 224 ,A_ : Any = 1e-05 ,**A_ : List[str] ,) -> None:
super().__init__(**_UpperCamelCase )
A = hidden_act
A = hidden_dropout_prob
A = hidden_sizes
A = num_hidden_layers
A = num_attention_heads
A = initializer_range
A = layer_norm_eps
A = patch_size
A = num_channels
A = depths
A = mlp_expansion_ratio
A = downsamples
A = dim
A = key_dim
A = attention_ratio
A = resolution
A = pool_size
A = downsample_patch_size
A = downsample_stride
A = downsample_pad
A = drop_path_rate
A = num_metaad_blocks
A = distillation
A = use_layer_scale
A = layer_scale_init_value
A = image_size
A = batch_norm_eps | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A : Any = get_tests_dir('fixtures')
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Tuple ) ->int:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ : Optional[int] = mock.Mock()
lowerCamelCase__ : Any = 5_0_0
lowerCamelCase__ : str = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : List[str] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
lowerCamelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self : Dict ) ->Tuple:
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Any = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __lowerCamelCase ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : Any = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowerCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __lowerCamelCase ( self : Tuple ) ->int:
CustomFeatureExtractor.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowerCamelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = '''xlnet'''
SCREAMING_SNAKE_CASE__ = ['''mems''']
SCREAMING_SNAKE_CASE__ = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=4_0_9_6 , UpperCAmelCase="gelu" , UpperCAmelCase=True , UpperCAmelCase="bi" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=False , UpperCAmelCase="last" , UpperCAmelCase=True , UpperCAmelCase="tanh" , UpperCAmelCase=0.1 , UpperCAmelCase=5 , UpperCAmelCase=5 , UpperCAmelCase=5 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Any:
_lowercase =vocab_size
_lowercase =d_model
_lowercase =n_layer
_lowercase =n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
_lowercase =d_model // n_head
_lowercase =ff_activation
_lowercase =d_inner
_lowercase =untie_r
_lowercase =attn_type
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =dropout
_lowercase =mem_len
_lowercase =reuse_len
_lowercase =bi_data
_lowercase =clamp_len
_lowercase =same_length
_lowercase =summary_type
_lowercase =summary_use_proj
_lowercase =summary_activation
_lowercase =summary_last_dropout
_lowercase =start_n_top
_lowercase =end_n_top
_lowercase =bos_token_id
_lowercase =pad_token_id
_lowercase =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , _UpperCamelCase , )
_lowercase =kwargs['use_cache']
_lowercase =use_mems_eval
_lowercase =use_mems_train
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __A (self ) -> Tuple:
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __A (self , UpperCAmelCase ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 5 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase : int = logging.get_logger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
def __init__( self , A=False , A=False , A=6.0 , A=None , A=False , A=False , A=None , A="fp4" , A=False , **A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = load_in_abit
lowerCamelCase = load_in_abit
lowerCamelCase = llm_inta_threshold
lowerCamelCase = llm_inta_skip_modules
lowerCamelCase = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase = llm_inta_has_fpaa_weight
lowerCamelCase = bnb_abit_quant_type
lowerCamelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase = torch.floataa
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCamelCase = getattr(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , torch.dtype ):
lowerCamelCase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def __A ( self ) -> int:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , _UpperCamelCase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def __A ( self ) -> str:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def __A ( self ) -> List[str]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __A ( cls , A , A , **A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = cls(**_UpperCamelCase )
lowerCamelCase = []
for key, value in kwargs.items():
if hasattr(_UpperCamelCase , _UpperCamelCase ):
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
to_remove.append(_UpperCamelCase )
for key in to_remove:
kwargs.pop(_UpperCamelCase , _UpperCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __A ( self , A ) -> int:
'''simple docstring'''
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
lowerCamelCase = self.to_dict()
lowerCamelCase = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n'
writer.write(_UpperCamelCase )
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return F'{self.__class__.__name__} {self.to_json_string()}'
def __A ( self , A = True ) -> str:
'''simple docstring'''
if use_diff is True:
lowerCamelCase = self.to_diff_dict()
else:
lowerCamelCase = self.to_dict()
return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = self.to_dict()
# get the default config dict
lowerCamelCase = BitsAndBytesConfig().to_dict()
lowerCamelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase = value
return serializable_config_dict
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : tuple[int, int] ,_UpperCAmelCase : int ) -> Union[str, Any]:
_a : Tuple =position
_a : str =[
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_a : Optional[Any] =[]
for position in positions:
_a : Tuple =position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ) -> Optional[Any]:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : tuple[int, int] ,_UpperCAmelCase : int ) -> Tuple:
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case ,len(__snake_case ) ):
_a : Any =position
if board[y][x] == 0:
_a : Optional[Any] =curr + 1
if open_knight_tour_helper(__snake_case ,__snake_case ,curr + 1 ):
return True
_a : List[Any] =0
return False
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Dict:
_a : str =[[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
_a : Optional[Any] =1
if open_knight_tour_helper(__snake_case ,(i, j) ,1 ):
return board
_a : List[Any] =0
_a : List[str] =F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = GenerationConfig(
do_sample=_UpperCamelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase ,config_name=_UpperCamelCase )
__lowercase = GenerationConfig.from_pretrained(_UpperCamelCase ,config_name=_UpperCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_UpperCamelCase )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,5_0 )
self.assertEqual(loaded_config.max_length ,2_0 )
self.assertEqual(loaded_config.max_time ,_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoConfig.from_pretrained('''gpt2''' )
__lowercase = GenerationConfig.from_model_config(_UpperCamelCase )
__lowercase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCamelCase ,_UpperCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = GenerationConfig()
__lowercase = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
__lowercase = copy.deepcopy(_UpperCamelCase )
__lowercase = generation_config.update(**_UpperCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCamelCase ,{'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = GenerationConfig()
__lowercase = 'bar'
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_UpperCamelCase )
__lowercase = GenerationConfig.from_pretrained(_UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'''bar''' )
__lowercase = GenerationConfig.from_model_config(_UpperCamelCase )
assert not hasattr(_UpperCamelCase ,'''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_UpperCamelCase )
self.assertEqual(default_config.num_beams ,1 )
__lowercase = GenerationConfig(
do_sample=_UpperCamelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_UpperCamelCase )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase )
__lowercase = GenerationConfig.from_pretrained(_UpperCamelCase ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_UpperCamelCase )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ):
__lowercase = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token ,repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = GenerationConfig(
do_sample=_UpperCamelCase ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''test-generation-config''' ,use_auth_token=self._token )
__lowercase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase ,repo_id='''test-generation-config''' ,push_to_hub=_UpperCamelCase ,use_auth_token=self._token )
__lowercase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = GenerationConfig(
do_sample=_UpperCamelCase ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''valid_org/test-generation-config-org''' ,use_auth_token=self._token )
__lowercase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase ,repo_id='''valid_org/test-generation-config-org''' ,push_to_hub=_UpperCamelCase ,use_auth_token=self._token )
__lowercase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
| 104 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def UpperCamelCase_( snake_case : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase_( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : int ):
'''simple docstring'''
snake_case_ = np.nan
for i in range(__snake_case ):
snake_case_ = features[:, labels == i]
snake_case_ = data.mean(1 )
# Centralize the data of class i
snake_case_ = data - column_reshape(__snake_case )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__snake_case , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case_ = np.dot(__snake_case , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase_( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : int ):
'''simple docstring'''
snake_case_ = features.mean(1 )
snake_case_ = np.nan
for i in range(__snake_case ):
snake_case_ = features[:, labels == i]
snake_case_ = data.shape[1]
snake_case_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__snake_case ) - column_reshape(__snake_case ) , (column_reshape(__snake_case ) - column_reshape(__snake_case )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case_ = device_data * np.dot(
column_reshape(__snake_case ) - column_reshape(__snake_case ) , (column_reshape(__snake_case ) - column_reshape(__snake_case )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase_( snake_case : np.ndarray , snake_case : int ):
'''simple docstring'''
if features.any():
snake_case_ = features.mean(1 )
# Center the dataset
snake_case_ = features - np.reshape(__snake_case , (data_mean.size, 1) )
snake_case_ = np.dot(__snake_case , centered_data.T ) / features.shape[1]
snake_case_ = np.linalg.eigh(__snake_case )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case_ = np.dot(filtered_eigenvectors.T , __snake_case )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__snake_case )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase_( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : int , snake_case : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case_ = eigh(
covariance_between_classes(__snake_case , __snake_case , __snake_case ) , covariance_within_classes(__snake_case , __snake_case , __snake_case ) , )
snake_case_ = eigenvectors[:, ::-1][:, :dimensions]
snake_case_ = np.linalg.svd(__snake_case )
snake_case_ = svd_matrix[:, 0:dimensions]
snake_case_ = np.dot(filtered_svd_matrix.T , __snake_case )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__snake_case )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case_ = np.array([0, 0, 0, 1, 1] )
snake_case_ = 2
snake_case_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__snake_case ) as error_info:
snake_case_ = linear_discriminant_analysis(
__snake_case , __snake_case , __snake_case , __snake_case )
if isinstance(__snake_case , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case_ = 2
snake_case_ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__snake_case ) as error_info:
snake_case_ = principal_component_analysis(__snake_case , __snake_case )
if not np.allclose(__snake_case , __snake_case ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class A__ ( _snake_case ):
"""simple docstring"""
UpperCamelCase_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _snake_case ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if latents is None:
_UpperCAmelCase : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_UpperCAmelCase : Tuple = latents.to(_UpperCamelCase )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : int = torch.device(F"""cuda:{gpu_id}""" )
_UpperCAmelCase : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , ) -> str:
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
_UpperCAmelCase : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
_UpperCAmelCase : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str = 1 , lowerCAmelCase__ : Optional[int] = 2_5 , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Dict = 4.0 , lowerCAmelCase__ : str = 6_4 , lowerCAmelCase__ : Dict = "pil" , lowerCAmelCase__ : Optional[Any] = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
_UpperCAmelCase : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}""" )
_UpperCAmelCase : Tuple = self._execution_device
_UpperCAmelCase : str = batch_size * num_images_per_prompt
_UpperCAmelCase : str = guidance_scale > 1.0
_UpperCAmelCase : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
_UpperCAmelCase : int = self.scheduler.timesteps
_UpperCAmelCase : int = self.prior.config.num_embeddings
_UpperCAmelCase : Any = self.prior.config.embedding_dim
_UpperCAmelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase : str = noise_pred.chunk(2 )
_UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
_UpperCAmelCase : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
_UpperCAmelCase : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
_UpperCAmelCase : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
_UpperCAmelCase : Dict = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase ) | 145 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowerCAmelCase ( _snake_case , unittest.TestCase ):
__UpperCAmelCase : str = BertJapaneseTokenizer
__UpperCAmelCase : Dict = False
__UpperCAmelCase : int = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = self.get_input_output_texts(_UpperCamelCase )
snake_case : int = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case : Tuple = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Any = self.tokenizer_class(self.vocab_file )
snake_case : Dict = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(_UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(_UpperCamelCase )
snake_case : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
snake_case : List[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_UpperCamelCase , "wb" ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "rb" ) as handle:
snake_case : str = pickle.load(_UpperCamelCase )
snake_case : Union[str, Any] = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
try:
snake_case : List[str] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
snake_case : Optional[int] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
try:
snake_case : int = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Any = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(_UpperCamelCase )
snake_case : Any = 'こんにちは、世界。\nこんばんは、世界。'
snake_case : List[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_UpperCamelCase , "wb" ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "rb" ) as handle:
snake_case : Tuple = pickle.load(_UpperCamelCase )
snake_case : Optional[int] = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Tuple = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(_UpperCamelCase )
snake_case : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
snake_case : Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_UpperCamelCase , "wb" ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "rb" ) as handle:
snake_case : str = pickle.load(_UpperCamelCase )
snake_case : Dict = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : int = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
snake_case : Any = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case : Union[str, Any] = i
snake_case : List[Any] = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
snake_case : str = tokenizer.subword_tokenizer
snake_case : Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(_UpperCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
snake_case : Any = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(_UpperCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
snake_case : Optional[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=_UpperCamelCase )
snake_case : str = tokenizer.encode("どういたしまして。" , add_special_tokens=_UpperCamelCase )
snake_case : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowerCAmelCase ( _snake_case , unittest.TestCase ):
__UpperCAmelCase : Dict = BertJapaneseTokenizer
__UpperCAmelCase : Any = False
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **_UpperCamelCase )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case : Optional[int] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
snake_case : Optional[Any] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
_UpperCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case : Dict = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case : Any = i
snake_case : int = CharacterTokenizer(vocab=_UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
snake_case : Dict = tokenizer.encode("ありがとう。" , add_special_tokens=_UpperCamelCase )
snake_case : Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=_UpperCamelCase )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = 'cl-tohoku/bert-base-japanese'
snake_case : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = 'cl-tohoku/bert-base-japanese'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
snake_case : List[Any] = 'bert-base-cased'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import math
import random
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> List[Any]:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A = 0.0_2
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__lowerCamelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__snake_case ):
# Forward propagation
__lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__lowerCamelCase = (expected / 100) - layer_a
# Error delta
__lowerCamelCase = layer_1_error * sigmoid_function(__snake_case , __snake_case )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Expected value: "))
__A = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 90 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
def __init__( self : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 155 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ):
A = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _snake_case ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from math import sqrt
def _a ( UpperCAmelCase = 1000000 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : int = 0
lowerCamelCase__ : int = 0
lowerCamelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
from math import factorial, radians
def UpperCAmelCase_ ( __snake_case , __snake_case = 18 , __snake_case = 10 ) -> List[Any]:
"""simple docstring"""
_lowercase =angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
_lowercase =radians(__snake_case )
_lowercase =angle_in_radians
_lowercase =3
_lowercase =-1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
_lowercase =-b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]=False ):
'''simple docstring'''
try:
lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase : Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version(""">=""" , __snake_case ) , f'test requires torch version >= {version}' )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__snake_case )
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__snake_case )
UpperCAmelCase : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__snake_case )
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = True
@classmethod
def __A ( cls ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
@classmethod
def __A ( cls ) -> List[str]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __A ( self ) -> str:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A ) -> Any:
'''simple docstring'''
lowerCamelCase = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = AcceleratorState()
lowerCamelCase = tensor[None].clone().to(state.device )
lowerCamelCase = gather(__snake_case ).cpu()
lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = returncode
lowerCamelCase = stdout
lowerCamelCase = stderr
async def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
while True:
lowerCamelCase = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(__snake_case ) )
lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase = []
lowerCamelCase = []
def tee(lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]="" ):
lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(__snake_case , __snake_case , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(__snake_case , __snake_case , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Tuple=180 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[Any]=True ):
'''simple docstring'''
lowerCamelCase = asyncio.get_event_loop()
lowerCamelCase = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
lowerCamelCase = ' '.join(__snake_case )
if result.returncode > 0:
lowerCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class __lowercase ( _snake_case ):
"""simple docstring"""
pass
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=False ):
'''simple docstring'''
try:
lowerCamelCase = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , """decode""" ):
lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 252 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__: Optional[int] = '''\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'''
A__: Any = '''\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'''
A__: str = '''\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ) -> Optional[Any]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any]="binary" ) -> List[Any]:
_a : str =simple_accuracy(__snake_case ,__snake_case )
_a : Dict =float(fa_score(y_true=__snake_case ,y_pred=__snake_case ,average=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ) -> Optional[Any]:
_a : Tuple ={}
for id_pred, label in zip(__snake_case ,__snake_case ):
_a : str =F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
_a : Tuple =id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : List[str] =[(pred, label)]
_a : List[str] =[], []
for question, preds_labels in question_map.items():
_a : str =zip(*__snake_case )
_a : List[Any] =fa_score(y_true=__snake_case ,y_pred=__snake_case ,average="""macro""" )
fas.append(__snake_case )
_a : int =int(sum(pred == label for pred, label in preds_labels ) == len(__snake_case ) )
ems.append(__snake_case )
_a : int =float(sum(__snake_case ) / len(__snake_case ) )
_a : Optional[int] =sum(__snake_case ) / len(__snake_case )
_a : List[str] =float(fa_score(y_true=__snake_case ,y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def __UpperCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_UpperCamelCase , _UpperCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(_UpperCamelCase , _UpperCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
_a : str =[
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_a : Any ={pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(_UpperCamelCase , _UpperCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_UpperCamelCase , _UpperCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 276 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (_snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = CLIPTokenizer
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
# fmt: off
__lowercase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowercase = dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
__lowercase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowercase = {'unk_token': '<unk>'}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**lowercase__ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**lowercase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = 'lower newer'
__lowercase = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = 'lower newer'
__lowercase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowercase = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
@require_ftfy
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
__lowercase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowercase = tokenizer_s.tokenize(_UpperCamelCase )
__lowercase = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowercase = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowercase = tokenizer_s.tokenize(_UpperCamelCase )
__lowercase = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
__lowercase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowercase = tokenizer_s.tokenize(_UpperCamelCase )
__lowercase = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
__lowercase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowercase = tokenizer_s.tokenize(_UpperCamelCase )
__lowercase = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase = F"{text_of_1_token} {text_of_1_token}"
__lowercase = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase ,use_fast=_UpperCamelCase ,)
__lowercase = tokenizer_r(_UpperCamelCase ,return_offsets_mapping=_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) ,)
__lowercase = F" {text}"
__lowercase = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase ,use_fast=_UpperCamelCase ,)
__lowercase = tokenizer_r(_UpperCamelCase ,return_offsets_mapping=_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE ( self : Tuple ):
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE ( self : str ):
# CLIP always lower cases letters
pass
| 104 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.