code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCAmelCase__ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCAmelCase__ = concatenate_datasets
UpperCAmelCase__ = DownloadConfig
UpperCAmelCase__ = DownloadManager
UpperCAmelCase__ = DownloadMode
UpperCAmelCase__ = DownloadConfig
UpperCAmelCase__ = DownloadMode
UpperCAmelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = 0 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_ , snake_case_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[PIL.Image.Image, np.ndarray]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCamelCase : PriorTransformer , UpperCamelCase : CLIPVisionModel , UpperCamelCase : CLIPImageProcessor , UpperCamelCase : HeunDiscreteScheduler , UpperCamelCase : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=UpperCamelCase , image_encoder=UpperCamelCase , image_processor=UpperCamelCase , scheduler=UpperCamelCase , renderer=UpperCamelCase , )
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase__ = latents.to(UpperCamelCase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Any=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f"cuda:{gpu_id}" )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase , axis=0 )
if not isinstance(UpperCamelCase , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase )
lowercase__ = self.image_encoder(UpperCamelCase )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__(self : str , UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase : int = 1 , UpperCamelCase : int = 25 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : float = 4.0 , UpperCamelCase : int = 64 , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase )}" )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# prior
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase , UpperCamelCase )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
lowercase__ = self.prior(
UpperCamelCase , timestep=UpperCamelCase , proj_embedding=UpperCamelCase , ).predicted_image_embedding
# remove the variance
lowercase__ ,lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ ,lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase , size=UpperCamelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase )
lowercase__ = torch.stack(UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase )
| 2 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
'''simple docstring'''
from collections import deque
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = len(snake_case__ )
A : str = deque()
A : int = [False for _ in range(snake_case__ )]
A : List[Any] = [-1 for _ in range(snake_case__ )]
A : Optional[int] = index_of[:]
def strong_connect(snake_case__ , snake_case__ , snake_case__ ):
A : int = index # the number when this node is seen
A : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
A : List[str] = True
for w in g[v]:
if index_of[w] == -1:
A : List[str] = strong_connect(snake_case__ , snake_case__ , snake_case__ )
A : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A : Optional[int] = []
A : List[str] = stack.pop()
A : List[str] = False
component.append(snake_case__ )
while w != v:
A : Optional[Any] = stack.pop()
A : List[str] = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
A : Any = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
lowercase : str = 7
lowercase : Any = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase : Tuple = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase : Optional[int] = [(u, v) for u, v in zip(source, target)]
lowercase : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 3 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Initialise PyTorch model
lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = AlbertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 4 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
import math
import qiskit
def UpperCAmelCase_ ( __snake_case = 1 , __snake_case = 1 , __snake_case = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowercase =qiskit.QuantumRegister(4 , '''qr''' )
_lowercase =qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_lowercase =[input_a, input_a, carry_in]
_lowercase =qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
_lowercase =qiskit.Aer.get_backend('''aer_simulator''' )
_lowercase =qiskit.execute(__snake_case , __snake_case , shots=1000 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 5 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL | 6 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowercase_ = 2048
lowercase_ = 4096
lowercase_ = 42
lowercase_ = os.environ.pop("PROCESS_TRAIN", "false")
lowercase_ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
A__ = {'id': example['id']}
A__ = example['annotations']
A__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ['yes'] if 1 in yes_no_answer else ['no']
A__ = A__ = []
A__ = A__ = []
A__ = ['<cls>']
else:
A__ = ['short']
A__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
A__ = ['long']
A__ = choose_first(annotation['long_answer'] , is_long_answer=SCREAMING_SNAKE_CASE__ )
A__ = []
answer.update(SCREAMING_SNAKE_CASE__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> int:
'''simple docstring'''
A__ = _get_single_answer(SCREAMING_SNAKE_CASE__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example['document']['tokens']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example['document']['tokens']
A__ = answer['start_token']
A__ = answer['end_token']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc['is_html'][answer['start_token'] : answer['end_token']]
A__ = doc['token'][answer['start_token'] : answer['end_token']]
A__ = ' '.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , SCREAMING_SNAKE_CASE__ , end='\n' )
print('Old:' , SCREAMING_SNAKE_CASE__ , end='\n\n' )
return {
"context": " ".join(SCREAMING_SNAKE_CASE__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=2048 , SCREAMING_SNAKE_CASE__ : str=4096 , SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_context_and_ans(SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ )
A__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(SCREAMING_SNAKE_CASE__ ),
"end_token": [-100] * len(SCREAMING_SNAKE_CASE__ ),
"category": category,
},
}
A__ = out['context'].split()
A__ = splitted_context[answer['end_token']]
A__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ , ).input_ids )
A__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
A__ = answer['start_token']
A__ = answer['end_token']
if assertion:
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , SCREAMING_SNAKE_CASE__ , end='\n\n' )
if len(SCREAMING_SNAKE_CASE__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append('null' )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE__ )
answers_end_token.append(SCREAMING_SNAKE_CASE__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) )
print('Old:' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=2048 , SCREAMING_SNAKE_CASE__ : Optional[int]=4096 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
A__ = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , doc_stride=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ , )
return example
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
with jsonlines.open(SCREAMING_SNAKE_CASE__ , 'a' ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE__ , total=len(SCREAMING_SNAKE_CASE__ ) , desc='Saving samples ... ' ):
A__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowercase_ = load_dataset("natural_questions")
lowercase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
lowercase_ = data["train" if PROCESS_TRAIN == "true" else "validation"]
lowercase_ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
lowercase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowercase_ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
lowercase_ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 7 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': 5_12,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[str]="<|endoftext|>" , _UpperCamelCase : List[Any]="<|endoftext|>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[Any]=True , **_UpperCamelCase : Optional[int] , ) ->Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCamelCase , merges=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , ) , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = add_prefix_space
def snake_case__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=None ) ->Tuple:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 8 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = CTRLTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def __magic_name__( self :List[Any] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE : Any = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__SCREAMING_SNAKE_CASE : Optional[int] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def __magic_name__( self :List[str] , **lowerCAmelCase__ :int ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''adapt react readapt apt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE : Optional[int] = '''adapt react readapt apt'''
__SCREAMING_SNAKE_CASE : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 9 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: Tuple = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Any = 1
SCREAMING_SNAKE_CASE_: List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
for i in range(_UpperCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
A__ = eval_examples
A__ = post_process_function
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Dataset] = None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "eval" , **UpperCAmelCase__ : List[str] , ) ->Dict[str, float]:
'''simple docstring'''
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCAmelCase__)
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.compute_metrics(UpperCAmelCase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
A__ = metrics.pop(UpperCAmelCase__)
metrics.update(output.metrics)
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__)
return metrics
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "test" , **UpperCAmelCase__ : int) ->int:
'''simple docstring'''
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCAmelCase__)
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , '''predict''')
A__ = self.compute_metrics(UpperCAmelCase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
A__ = metrics.pop(UpperCAmelCase__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__)
| 14 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[Any] = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "owlvit_text_model"
def __init__( self : Dict ,A : Any=4_94_08 ,A : Optional[Any]=5_12 ,A : Optional[Any]=20_48 ,A : int=12 ,A : List[Any]=8 ,A : str=16 ,A : Dict="quick_gelu" ,A : str=1E-5 ,A : Dict=0.0 ,A : str=0.02 ,A : List[Any]=1.0 ,A : List[Any]=0 ,A : int=4_94_06 ,A : List[Any]=4_94_07 ,**A : List[str] ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = max_position_embeddings
__A = hidden_act
__A = layer_norm_eps
__A = attention_dropout
__A = initializer_range
__A = initializer_factor
@classmethod
def UpperCamelCase_ ( cls : List[str] ,A : Union[str, os.PathLike] ,**A : Optional[Any] ):
cls._set_token_in_kwargs(A )
__A , __A = cls.get_config_dict(A ,**A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
__A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A ,**A )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "owlvit_vision_model"
def __init__( self : List[Any] ,A : str=7_68 ,A : Tuple=30_72 ,A : List[str]=12 ,A : str=12 ,A : List[str]=3 ,A : List[str]=7_68 ,A : Optional[Any]=32 ,A : Optional[int]="quick_gelu" ,A : List[str]=1E-5 ,A : Optional[int]=0.0 ,A : Union[str, Any]=0.02 ,A : Optional[Any]=1.0 ,**A : Union[str, Any] ,):
super().__init__(**A )
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = num_channels
__A = image_size
__A = patch_size
__A = hidden_act
__A = layer_norm_eps
__A = attention_dropout
__A = initializer_range
__A = initializer_factor
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,A : Union[str, os.PathLike] ,**A : List[Any] ):
cls._set_token_in_kwargs(A )
__A , __A = cls.get_config_dict(A ,**A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
__A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A ,**A )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "owlvit"
snake_case_ = True
def __init__( self : Any ,A : List[Any]=None ,A : Tuple=None ,A : Optional[Any]=5_12 ,A : Tuple=2.65_92 ,A : Dict=True ,**A : Optional[Any] ,):
super().__init__(**A )
if text_config is None:
__A = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
__A = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
__A = OwlViTTextConfig(**A )
__A = OwlViTVisionConfig(**A )
__A = projection_dim
__A = logit_scale_init_value
__A = return_dict
__A = 1.0
@classmethod
def UpperCamelCase_ ( cls : List[Any] ,A : Union[str, os.PathLike] ,**A : List[Any] ):
cls._set_token_in_kwargs(A )
__A , __A = cls.get_config_dict(A ,**A )
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A ,**A )
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Dict ,**A : Any ):
__A = {}
__A = text_config
__A = vision_config
return cls.from_dict(A ,**A )
def UpperCamelCase_ ( self : Any ):
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def UpperCamelCase_ ( self : List[str] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def UpperCamelCase_ ( self : Dict ):
return 1E-4
def UpperCamelCase_ ( self : Union[str, Any] ,A : "ProcessorMixin" ,A : int = -1 ,A : int = -1 ,A : Optional["TensorType"] = None ,):
__A = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=A ,seq_length=A ,framework=A )
__A = super().generate_dummy_inputs(
processor.image_processor ,batch_size=A ,framework=A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self : Any ):
return 14
| 15 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCAmelCase_ = logging.getLogger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , ) -> int:
lowercase__ : Dict = bnb_quantization_config.load_in_abit
lowercase__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowercase__ : List[str] = []
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowercase__ : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ : Any = get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowercase__ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ : Union[str, Any] = []
lowercase__ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowercase__ : Optional[Any] = load_in_abit
lowercase__ : Optional[Any] = load_in_abit
lowercase__ : str = get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowercase__ : Dict = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowercase__ : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ : Any = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowercase__ : Dict = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowercase__ : int = replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowercase__ : List[str] = get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ : List[str] = True
lowercase__ : int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
lowercase__ : Optional[int] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowercase__ : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ : List[str] = {}
lowercase__ : Tuple = special_dtypes
lowercase__ : Optional[int] = no_split_module_classes
lowercase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ : Optional[Any] = get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowercase__ : int = max_memory
lowercase__ : List[Any] = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowercase__ : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> str:
if modules_to_not_convert is None:
lowercase__ : Optional[int] = []
lowercase__ , lowercase__ : Optional[Any] = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int:
lowercase__ : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ : Optional[int] = []
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ : Union[str, Any] = '''.'''.join(__lowerCamelCase )
lowercase__ : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowercase__ : List[str] = module.weight.data
if module.bias is not None:
lowercase__ : Dict = module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Tuple = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ : int = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
# Create a copy of the model
with init_empty_weights():
lowercase__ : int = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ : Optional[Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ : Optional[int] = sum(__lowerCamelCase , [] )
lowercase__ : List[str] = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase__ : Any = False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowercase__ : Optional[int] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ : int = list(model.named_children() )
lowercase__ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ : Union[str, Any] = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase__ : List[str] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase__ : int = ['''.weight''', '''.bias''']
lowercase__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ : str = name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return next(parameter.parameters() ).device
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowercase__ : Any = param_name
lowercase__ : List[Any] = model
if "." in tensor_name:
lowercase__ : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase__ : int = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowercase__ : int = new_module
lowercase__ : str = splits[-1]
# offload weights
lowercase__ : Any = False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 16 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A ( UpperCamelCase_ : list[int]) -> int:
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase ,__lowercase = (
max_excluding + num,
max(UpperCamelCase_, UpperCamelCase_),
)
return max(UpperCamelCase_, UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Tuple = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A ='''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A ='''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A ='''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 19 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
_a : int= "pixel_values"
_a : List[str]= False
_a : Union[str, Any]= TimmBackboneConfig
def __init__( self ,snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,"""timm""" )
super().__init__(snake_case )
lowercase : int = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(snake_case ,"""out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowercase : str = getattr(snake_case ,"""use_pretrained_backbone""" ,snake_case )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase : Optional[int] = config.out_indices if getattr(snake_case ,"""out_indices""" ,snake_case ) is not None else (-1,)
lowercase : List[Any] = timm.create_model(
config.backbone ,pretrained=snake_case ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=snake_case ,**snake_case ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase : Optional[int] = self._backbone.return_layers
lowercase : Tuple = {layer["""module"""]: str(snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase : Tuple = kwargs.pop("""config""" ,TimmBackboneConfig() )
lowercase : int = kwargs.pop("""use_timm_backbone""" ,snake_case )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowercase : Any = kwargs.pop("""num_channels""" ,config.num_channels )
lowercase : List[Any] = kwargs.pop("""features_only""" ,config.features_only )
lowercase : Any = kwargs.pop("""use_pretrained_backbone""" ,config.use_pretrained_backbone )
lowercase : Dict = kwargs.pop("""out_indices""" ,config.out_indices )
lowercase : Any = TimmBackboneConfig(
backbone=snake_case ,num_channels=snake_case ,features_only=snake_case ,use_pretrained_backbone=snake_case ,out_indices=snake_case ,)
return super()._from_config(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase : str = self._all_layers
lowercase : Any = self._backbone(snake_case ,**snake_case )
lowercase : List[str] = self._return_layers
lowercase : List[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase : List[Any] = self._backbone(snake_case ,**snake_case )
lowercase : Tuple = None
lowercase : Any = tuple(snake_case )
lowercase : Dict = tuple(snake_case ) if hidden_states is not None else None
if not return_dict:
lowercase : Union[str, Any] = (feature_maps,)
if output_hidden_states:
lowercase : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case ,hidden_states=snake_case ,attentions=snake_case )
| 20 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Dict = StableDiffusionSAGPipeline
lowercase_ : List[Any] = TEXT_TO_IMAGE_PARAMS
lowercase_ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ : List[str] = False
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
_lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0)
_lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, )
_lowercase : Dict = CLIPTextModel(lowerCamelCase)
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : List[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[Any] = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_lowercase : Optional[Any] = sag_pipe.to(lowerCamelCase)
sag_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = '.'
_lowercase : int = torch.manual_seed(0)
_lowercase : Optional[Any] = sag_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np')
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Tuple = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[Any] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : Dict = sag_pipe.to(lowerCamelCase)
sag_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = '.'
_lowercase : str = torch.manual_seed(0)
_lowercase : Any = sag_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np')
_lowercase : List[str] = output.images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : List[str] = sag_pipe.to(lowerCamelCase)
sag_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = '.'
_lowercase : Tuple = torch.manual_seed(0)
_lowercase : List[Any] = sag_pipe(
[prompt], width=7_68, height=5_12, generator=lowerCamelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np', )
_lowercase : Tuple = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 21 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase : List[str] = TapasConfig.from_json_file(_lowerCAmelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase : Any = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase : int = 4
UpperCAmelCase : int = True
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 0.6_6_4_6_9_4
UpperCAmelCase : Tuple = 0.2_0_7_9_5_1
UpperCAmelCase : Dict = 0.1_2_1_1_9_4
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = 0.0_3_5_2_5_1_3
UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = False
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 3_6.4_5_1_9
UpperCAmelCase : Optional[Any] = 0.9_0_3_4_2_1
UpperCAmelCase : Dict = 2_2_2.0_8_8
UpperCAmelCase : int = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = 0.7_6_3_1_4_1
UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "TABFACT":
UpperCAmelCase : List[str] = TapasForSequenceClassification(config=_lowerCAmelCase )
elif task == "MLM":
UpperCAmelCase : List[str] = TapasForMaskedLM(config=_lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase : List[Any] = TapasModel(config=_lowerCAmelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_lowerCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCamelCase__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 23 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any ) -> str:
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Dict ) -> int:
__snake_case = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__snake_case = []
if args.gold_data_mode == "qa":
__snake_case = pd.read_csv(snake_case_ , sep='''\t''' , header=snake_case_ )
for answer_list in data[1]:
__snake_case = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
__snake_case = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__snake_case = [[reference] for reference in references]
__snake_case = __snake_case = __snake_case = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
__snake_case = 100.0 * em / total
__snake_case = 100.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Dict ) -> int:
__snake_case = args.k
__snake_case = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__snake_case = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__snake_case = __snake_case = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
__snake_case = set(hypo.split('''\t''' )[:k] )
__snake_case = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__snake_case = 100.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ) -> Optional[Any]:
def strip_title(snake_case_ : Dict ):
if title.startswith('''"''' ):
__snake_case = title[1:]
if title.endswith('''"''' ):
__snake_case = title[:-1]
return title
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ , )['''input_ids'''].to(args.device )
__snake_case = rag_model.rag.question_encoder(snake_case_ )
__snake_case = question_enc_outputs[0]
__snake_case = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__snake_case = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__snake_case = []
for docs in all_docs:
__snake_case = [strip_title(snake_case_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(snake_case_ ) )
return provenance_strings
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> str:
with torch.no_grad():
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ )
__snake_case = inputs_dict.input_ids.to(args.device )
__snake_case = inputs_dict.attention_mask.to(args.device )
__snake_case = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__snake_case = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info('''Q: {} - A: {}'''.format(snake_case_ , snake_case_ ) )
return answers
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=snake_case_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=snake_case_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=snake_case_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=snake_case_ , type=snake_case_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=snake_case_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=snake_case_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=snake_case_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=snake_case_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=snake_case_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=snake_case_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=snake_case_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=snake_case_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=snake_case_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__snake_case = parser.parse_args()
__snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def lowerCamelCase__ ( snake_case_ : int ) -> str:
__snake_case = {}
if args.model_type is None:
__snake_case = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
__snake_case = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__snake_case = args.n_docs
if args.index_name is not None:
__snake_case = args.index_name
if args.index_path is not None:
__snake_case = args.index_path
else:
__snake_case = BartForConditionalGeneration
__snake_case = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , snake_case_ )
__snake_case = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__snake_case = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(snake_case_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
__snake_case = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
__snake_case = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
__snake_case = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
__snake_case = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
__snake_case = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) + '''\n''' )
preds_file.flush()
__snake_case = []
if len(snake_case_ ) > 0:
__snake_case = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case_ = get_args()
main(args)
| 24 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['BeitFeatureExtractor']
UpperCAmelCase__ : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase_ ( snake_case_ ):
_A : typing.Counter[int] = Counter()
for base in range(1,max_perimeter + 1 ):
for perpendicular in range(snake_case_,max_perimeter + 1 ):
_A : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
_A : int = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : int = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
import math
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = 0.0
__a : Optional[int] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __a , __a , __a , __a ):
'''simple docstring'''
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase ():
# Training Examples ( m, n )
__a : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a : Optional[Any] = SelfOrganizingMap()
__a : Any = 3
__a : Tuple = 0.5
for _ in range(_SCREAMING_SNAKE_CASE ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
# training sample
__a : List[str] = training_samples[j]
# Compute the winning vector
__a : Union[str, Any] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Update the winning vector
__a : int = self_organizing_map.update(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# classify test sample
__a : Union[str, Any] = [0, 0, 0, 1]
__a : Optional[int] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 27 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
from timeit import timeit
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
def do_benchmark(A__ ) -> None:
UpperCamelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
UpperCamelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
UpperCamelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 28 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase (folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_snake_case : bool = None
_snake_case : bool = None
class lowerCamelCase (folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_snake_case : int = datasets.Audio()
_snake_case : Optional[int] = '''audio'''
_snake_case : int = AudioFolderConfig
_snake_case : List[str] # definition at the bottom of the script
_snake_case : Dict = AudioClassification(audio_column='''audio''' , label_column='''label''' )
__UpperCAmelCase = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__UpperCAmelCase = AUDIO_EXTENSIONS
| 29 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = 'xmod'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Dict=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]="absolute" , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=("en_XX",) , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
lowercase_ = pre_norm
lowercase_ = adapter_reduction_factor
lowercase_ = adapter_layer_norm
lowercase_ = adapter_reuse_layer_norm
lowercase_ = ln_before_adapter
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = default_language
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@property
def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 30 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = 9, 14 # noqa: F841
_UpperCAmelCase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCAmelCase : Any = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCAmelCase : Tuple = mst(_UpperCAmelCase )
_UpperCAmelCase : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCAmelCase : Union[str, Any] = tuple(answer[:2] )
_UpperCAmelCase : Optional[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 31 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Optional[Any] , __A : int , __A : List[Any] , __A : Union[str, Any] ) -> str:
"""simple docstring"""
with open(__A ) as metadata_file:
a_ : Optional[int] = json.load(__A )
a_ : Any = LukeConfig(use_entity_aware_attention=__A , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
a_ : List[Any] = torch.load(__A , map_location='cpu' )['module']
# Load the entity vocab file
a_ : List[str] = load_original_entity_vocab(__A )
# add an entry for [MASK2]
a_ : Tuple = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
a_ : Optional[Any] = AddedToken('<ent>' , lstrip=__A , rstrip=__A )
a_ : List[str] = AddedToken('<ent2>' , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'r' ) as f:
a_ : Optional[int] = json.load(__A )
a_ : Union[str, Any] = 'MLukeTokenizer'
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__A , __A )
a_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(['@'] )[0]
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(['#'] )[0]
a_ : Any = state_dict['embeddings.word_embeddings.weight']
a_ : Dict = word_emb[ent_init_index].unsqueeze(0 )
a_ : Dict = word_emb[enta_init_index].unsqueeze(0 )
a_ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ : Union[str, Any] = state_dict[bias_name]
a_ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a_ : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
a_ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ : List[str] = F"""encoder.layer.{layer_index}.attention.self."""
a_ : List[str] = state_dict[prefix + matrix_name]
a_ : Optional[Any] = state_dict[prefix + matrix_name]
a_ : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ : Tuple = state_dict['entity_embeddings.entity_embeddings.weight']
a_ : Any = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
a_ : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ : Dict = state_dict['entity_predictions.bias']
a_ : Optional[Any] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
a_ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ : str = LukeForMaskedLM(config=__A ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
a_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
a_ : Any = state_dict[key]
else:
a_ : Dict = state_dict[key]
a_ , a_ : Dict = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ : Optional[int] = MLukeTokenizer.from_pretrained(__A , task='entity_classification' )
a_ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
a_ : Dict = (0, 9)
a_ : Optional[int] = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
a_ : int = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : Optional[Any] = torch.Size((1, 33, 7_68) )
a_ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : str = torch.Size((1, 1, 7_68) )
a_ : Optional[int] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__A )
a_ : Union[str, Any] = 'Tokyo is the capital of <mask>.'
a_ : Dict = (24, 30)
a_ : Tuple = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
a_ : str = model(**__A )
a_ : List[Any] = encoding['input_ids'][0].tolist()
a_ : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
a_ : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
a_ : Any = outputs.entity_logits[0][0].argmax().item()
a_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__A ) )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Tuple:
"""simple docstring"""
a_ : List[str] = ['[MASK]', '[PAD]', '[UNK]']
a_ : Any = [json.loads(__A ) for line in open(__A )]
a_ : Optional[Any] = {}
for entry in data:
a_ : List[str] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ : Optional[int] = entity_id
break
a_ : List[Any] = F"""{language}:{entity_name}"""
a_ : str = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 32 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
def lowercase ( __snake_case : Union[tf.Tensor, np.ndarray] ):
if isinstance(__snake_case , np.ndarray ):
return list(tensor.shape )
lowercase_ : Union[str, Any] = tf.shape(__snake_case )
if tensor.shape == tf.TensorShape(__snake_case ):
return dynamic
lowercase_ : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )]
def lowercase ( __snake_case : tf.Tensor , __snake_case : Optional[int] = None , __snake_case : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=__snake_case , name=__snake_case )
def lowercase ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any]=1e-5 , __snake_case : Optional[int]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case , __snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : Dict = tf.nn.moments(__snake_case , axes=[axis] , keepdims=__snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : Optional[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(__snake_case )[axis]
lowercase_ : Any = tf.reshape(__snake_case , __snake_case )
lowercase_ : Any = tf.reshape(__snake_case , __snake_case )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
__snake_case , __snake_case , __snake_case , offset=__snake_case , scale=__snake_case , variance_epsilon=__snake_case , )
return outputs
def lowercase ( __snake_case : Any , __snake_case : int=0 , __snake_case : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : Tuple = tf.shape(__snake_case )
lowercase_ : Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__snake_case , __snake_case )
def lowercase ( __snake_case : tf.Tensor ):
if not isinstance(__snake_case , tf.Tensor ):
lowercase_ : Dict = tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase ( __snake_case : tf.Tensor , __snake_case : int , __snake_case : str = "input_ids" ):
tf.debugging.assert_less(
__snake_case , tf.cast(__snake_case , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict ):
lowercase_ : int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[int] = [x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : List[str] = np.asarray(__snake_case )
lowercase_ : Union[str, Any] = 1
lowercase_ : Dict = np.array_split(__snake_case , __snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : List[Any] = np.array_split(__snake_case , __snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__snake_case ):
lowercase_ : Any = chunk_data
else:
lowercase_ : Tuple = data
def lowercase ( __snake_case : Any , __snake_case : Union[str, Any] ):
if name in group.attrs:
lowercase_ : int = [n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase_ : Any = []
lowercase_ : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase ( __snake_case : Optional[Any] ):
def _expand_single_ad_tensor(__snake_case : Union[str, Any] ):
if isinstance(__snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __snake_case )
| 33 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A =logging.get_logger(__name__)
A ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( __a , __a ):
__a : Optional[Any] = """swin"""
__a : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowercase : List[Any]=224 , lowercase : Union[str, Any]=4 , lowercase : str=3 , lowercase : Union[str, Any]=96 , lowercase : Any=[2, 2, 6, 2] , lowercase : Tuple=[3, 6, 12, 24] , lowercase : Any=7 , lowercase : Optional[Any]=4.0 , lowercase : int=True , lowercase : int=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.1 , lowercase : Tuple="gelu" , lowercase : Any=False , lowercase : int=0.02 , lowercase : List[str]=1E-5 , lowercase : int=32 , lowercase : Any=None , lowercase : str=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(lowercase )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(lowercase ) - 1) )
UpperCAmelCase = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowercase ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class _a ( __a ):
__a : Optional[Any] = version.parse("""1.11""" )
@property
def A ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : int ):
'''simple docstring'''
return 1E-4
| 34 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Train language if it is different from the evaluation language.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : List[Any] = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[Any] = train_dataset.features["label"].names
if training_args.do_eval:
_lowerCAmelCase : int = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
_lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = predict_dataset.features["label"].names
# Labels
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : List[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : Optional[Any] = False
def preprocess_function(_lowerCamelCase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCAmelCase : Optional[Any] = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_lowerCAmelCase : Dict = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Union[str, Any] = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCAmelCase : Union[str, Any] = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_lowerCAmelCase : Tuple = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : List[Any] = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
_lowerCAmelCase : List[str] = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_lowerCAmelCase : Optional[int] = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
_lowerCAmelCase : List[str] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
_lowerCAmelCase : Dict = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : int = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : List[str] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
_lowerCAmelCase : Union[str, Any] = None
# Initialize our Trainer
_lowerCAmelCase : Dict = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : Any = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : List[str] = train_result.metrics
_lowerCAmelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Tuple = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = trainer.predict(_lowerCamelCase , metric_key_prefix="predict" )
_lowerCAmelCase : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : int = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("predict" , _lowerCamelCase )
trainer.save_metrics("predict" , _lowerCamelCase )
_lowerCAmelCase : str = np.argmax(_lowerCamelCase , axis=1 )
_lowerCAmelCase : Any = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 36 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**__UpperCAmelCase ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : str = deprecated_arg[3:]
lowerCAmelCase__ : Union[str, Any] = not kwargs.pop(__UpperCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop("""tpu_name""" ,self.tpu_name )
lowerCAmelCase__ : str = kwargs.pop("""device_idx""" ,self.device_idx )
lowerCAmelCase__ : int = kwargs.pop("""eager_mode""" ,self.eager_mode )
lowerCAmelCase__ : int = kwargs.pop("""use_xla""" ,self.use_xla )
super().__init__(**__UpperCAmelCase )
__lowercase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Name of TPU'''} , )
__lowercase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Benchmark models in eager model.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,["""tf"""] )
lowerCAmelCase__ : Any = None
if self.tpu:
try:
if self.tpu_name:
lowerCAmelCase__ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCAmelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCAmelCase__ : Optional[Any] = None
return tpu
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCAmelCase__ : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" )
lowerCAmelCase__ : str = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU
lowerCAmelCase__ : str = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase_ ( self ) -> bool:
requires_backends(self ,["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCAmelCase_ ( self ) -> "tf.distribute.Strategy":
requires_backends(self ,["""tf"""] )
return self._setup_strategy
@property
def UpperCAmelCase_ ( self ) -> str:
requires_backends(self ,["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCAmelCase_ ( self ) -> int:
requires_backends(self ,["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase_ ( self ) -> bool:
return self.n_gpu > 0
| 37 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
from __future__ import annotations
import queue
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :Optional[Any] = data
UpperCamelCase :Dict = None
UpperCamelCase :List[Any] = None
def SCREAMING_SNAKE_CASE_ ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
UpperCamelCase :List[str] = input("""Enter the value of the root node: """ ).strip().lower()
UpperCamelCase :queue.Queue = queue.Queue()
UpperCamelCase :int = TreeNode(int(__magic_name__ ) )
q.put(__magic_name__ )
while not q.empty():
UpperCamelCase :Any = q.get()
UpperCamelCase :Union[str, Any] = f"""Enter the left node of {node_found.data}: """
UpperCamelCase :Tuple = input(__magic_name__ ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCamelCase :int = TreeNode(int(__magic_name__ ) )
UpperCamelCase :List[Any] = left_node
q.put(__magic_name__ )
UpperCamelCase :Optional[Any] = f"""Enter the right node of {node_found.data}: """
UpperCamelCase :Any = input(__magic_name__ ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCamelCase :Union[str, Any] = TreeNode(int(__magic_name__ ) )
UpperCamelCase :Any = right_node
q.put(__magic_name__ )
raise
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
UpperCamelCase :queue.Queue = queue.Queue()
q.put(__magic_name__ )
while not q.empty():
UpperCamelCase :Optional[Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
UpperCamelCase :queue.Queue = queue.Queue()
q.put(__magic_name__ )
while not q.empty():
UpperCamelCase :str = []
while not q.empty():
UpperCamelCase :List[Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
UpperCamelCase :list[TreeNode] = []
UpperCamelCase :List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__magic_name__ )
UpperCamelCase :Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCamelCase :Dict = stack.pop()
# start to traverse its right child
UpperCamelCase :Union[str, Any] = n.right
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
UpperCamelCase :list[TreeNode] = []
UpperCamelCase :List[Any] = node
while n or stack:
while n:
stack.append(__magic_name__ )
UpperCamelCase :str = n.left
UpperCamelCase :List[str] = stack.pop()
print(n.data , end=""",""" )
UpperCamelCase :str = n.right
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or not node:
return
UpperCamelCase , UpperCamelCase :str = [], []
UpperCamelCase :Any = node
stacka.append(__magic_name__ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCamelCase :Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__magic_name__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str = "" , __magic_name__ : Any=50 , __magic_name__ : Optional[Any]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
UpperCamelCase , UpperCamelCase :Union[str, Any] = divmod(width - len(__magic_name__ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 38 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
def __A ( __lowerCAmelCase = 1_000 )-> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase , _UpperCAmelCase = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 39 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """sew-d"""
def __init__( self : Optional[int] , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : str=3072 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=512 , __UpperCAmelCase : int=256 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=("p2c", "c2p") , __UpperCAmelCase : Any="layer_norm" , __UpperCAmelCase : Optional[Any]="gelu_python" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[Any]=1e-7 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Dict="group" , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Union[str, Any]=128 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=0.05 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Optional[Any]="mean" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[Any]=256 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : List[Any]=2 , **__UpperCAmelCase : str , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Tuple = hidden_size
a : Optional[Any] = feat_extract_norm
a : List[str] = feat_extract_activation
a : str = list(__UpperCAmelCase)
a : Dict = list(__UpperCAmelCase)
a : str = list(__UpperCAmelCase)
a : int = conv_bias
a : Optional[int] = num_conv_pos_embeddings
a : Union[str, Any] = num_conv_pos_embedding_groups
a : str = len(self.conv_dim)
a : int = num_hidden_layers
a : Dict = intermediate_size
a : Optional[Any] = squeeze_factor
a : Union[str, Any] = max_position_embeddings
a : Any = position_buckets
a : Tuple = share_att_key
a : str = relative_attention
a : Tuple = norm_rel_ebd
a : Tuple = list(__UpperCAmelCase)
a : Tuple = hidden_act
a : List[str] = num_attention_heads
a : Optional[int] = hidden_dropout
a : int = attention_dropout
a : Dict = activation_dropout
a : str = feat_proj_dropout
a : Any = final_dropout
a : Optional[Any] = layer_norm_eps
a : List[Any] = feature_layer_norm_eps
a : str = initializer_range
a : Dict = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : str = apply_spec_augment
a : int = mask_time_prob
a : Union[str, Any] = mask_time_length
a : Any = mask_time_min_masks
a : Tuple = mask_feature_prob
a : List[Any] = mask_feature_length
a : Optional[Any] = mask_feature_min_masks
# ctc loss
a : Tuple = ctc_loss_reduction
a : str = ctc_zero_infinity
# sequence classification
a : int = use_weighted_layer_sum
a : Any = classifier_proj_size
@property
def __snake_case ( self : Optional[Any]):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 40 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Optional[Any] = int(length / 2 )
for i in range(UpperCamelCase , low + middle ):
comp_and_swap(UpperCamelCase , UpperCamelCase , i + middle , UpperCamelCase )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
bitonic_merge(UpperCamelCase , low + middle , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Tuple = int(length / 2 )
bitonic_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , 1 )
bitonic_sort(UpperCamelCase , low + middle , UpperCamelCase , 0 )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by a comma:\n''').strip()
_A : List[str] =[int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 41 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(__A ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(__A ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 42 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowercase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE , all_configs=SCREAMING_SNAKE_CASE , save_infos=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = TestCommand(*SCREAMING_SNAKE_CASE )
test_command.run()
__UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE , '''README.md''' )
assert os.path.exists(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase :Optional[int] = getattr(dataset_infos['''default'''] , SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos['''default'''] , SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE ) == list(SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 43 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase_ = 6378137.0
lowercase_ = 6356752.314245
lowercase_ = 6_3_7_8_1_3_7
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
__a = (AXIS_A - AXIS_B) / AXIS_A
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = radians(lowerCAmelCase__ )
__a = radians(lowerCAmelCase__ )
# Equation
__a = sin((phi_a - phi_a) / 2 )
__a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a = sqrt(sin_sq_phi + (cos(lowerCAmelCase__ ) * cos(lowerCAmelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(range(len(_UpperCamelCase ) ) )
_SCREAMING_SNAKE_CASE =[v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
_SCREAMING_SNAKE_CASE =1
max_value += value[i]
capacity -= weight[i]
else:
_SCREAMING_SNAKE_CASE =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
import numpy as np
from transformers import Pipeline
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Dict = np.max(_SCREAMING_SNAKE_CASE ,axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self , **UpperCamelCase__ ) -> Dict:
lowerCamelCase : Optional[Any] = {}
if "second_text" in kwargs:
lowerCamelCase : Union[str, Any] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
return self.tokenizer(UpperCamelCase__ , text_pair=UpperCamelCase__ , return_tensors=self.framework )
def _lowercase ( self , UpperCamelCase__ ) -> str:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : str = model_outputs.logits[0].numpy()
lowerCamelCase : Any = softmax(UpperCamelCase__ )
lowerCamelCase : List[str] = np.argmax(UpperCamelCase__ )
lowerCamelCase : List[str] = self.model.config.idalabel[best_class]
lowerCamelCase : List[str] = probabilities[best_class].item()
lowerCamelCase : Optional[Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__snake_case :Any = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 49 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
from math import factorial
_UpperCAmelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 60 , _UpperCAmelCase = 100_0000 ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCamelCase__ : Optional[Any] = 0
# the cached sizes of the previous chains
lowerCamelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ : Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
lowerCamelCase__ : Union[str, Any] = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ : int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 50 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
from __future__ import annotations
def A (__A : list[float] , __A : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase_ = sorted(numsa + numsa )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Optional[int] = [float(x) for x in input("Enter the elements of first array: ").split()]
snake_case_ : Tuple = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 51 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A__ ( unittest.TestCase , __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = load_tool("text-classification" )
self.tool.setup()
UpperCamelCase : List[str] = load_tool("text-classification" , remote=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(A_ , "positive" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(A_ , "positive" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(A_ , "positive" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(A_ , "positive" )
| 52 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a__ : Tuple =[
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a__ : Tuple =argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
a__ : Union[str, Any] =parser.parse_args()
if args.check_lib:
a__ : Optional[int] =importlib.import_module('''transformers''')
a__ : List[str] =Path(transformers_module.__file__).parent
else:
a__ : Tuple =Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 53 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : list[str] ) -> Any:
__SCREAMING_SNAKE_CASE = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase__ )
self.set_fail_transitions()
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ) -> None:
__SCREAMING_SNAKE_CASE = 0
for character in keyword:
__SCREAMING_SNAKE_CASE = self.find_next_state(UpperCAmelCase__ , UpperCAmelCase__ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__SCREAMING_SNAKE_CASE = len(self.adlist ) - 1
else:
__SCREAMING_SNAKE_CASE = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
__SCREAMING_SNAKE_CASE = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 0
while q:
__SCREAMING_SNAKE_CASE = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCAmelCase__ , self.adlist[child]["value"] ) is None
and state != 0
):
__SCREAMING_SNAKE_CASE = self.adlist[state]["fail_state"]
__SCREAMING_SNAKE_CASE = self.find_next_state(
UpperCAmelCase__ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str ) -> dict[str, list[int]]:
__SCREAMING_SNAKE_CASE = {} # returns a dict with keywords and list of its occurrences
__SCREAMING_SNAKE_CASE = 0
for i in range(len(UpperCAmelCase__ ) ):
while (
self.find_next_state(UpperCAmelCase__ , string[i] ) is None
and current_state != 0
):
__SCREAMING_SNAKE_CASE = self.adlist[current_state]["fail_state"]
__SCREAMING_SNAKE_CASE = self.find_next_state(UpperCAmelCase__ , string[i] )
if next_state is None:
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__SCREAMING_SNAKE_CASE = []
result[key].append(i - len(UpperCAmelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=10 , UpperCamelCase=3 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=0.9 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = tubelet_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ = int(mask_ratio * self.seq_length )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
lowerCamelCase_ = mask.sum().item()
lowerCamelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase_ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCamelCase_ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size([1, 1408, 1536] )
lowerCamelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 55 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
snake_case_ = mam_aaa['''args''']
snake_case_ = mam_aaa['''model''']
snake_case_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ = args.share_decoder_input_output_embed
snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case_ = SpeechaTextConfig(
vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, )
snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F" but all the following weights are missing {missing}" )
if tie_embeds:
snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 56 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : List[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""distilbert"""
__UpperCAmelCase : List[str] ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 57 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase ( __lowerCamelCase : int ) ->Optional[Any]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
TestCommand.register_subcommand(__lowerCamelCase )
RunBeamCommand.register_subcommand(__lowerCamelCase )
DummyDataCommand.register_subcommand(__lowerCamelCase )
# Parse args
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
_SCREAMING_SNAKE_CASE = parse_unknown_args(__lowerCamelCase )
# Run
_SCREAMING_SNAKE_CASE = args.func(__lowerCamelCase , **__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 58 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any]=13 , snake_case__ : List[str]=7 , snake_case__ : Dict=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=False , snake_case__ : Tuple=False , snake_case__ : Tuple=False , snake_case__ : List[str]=2 , snake_case__ : List[Any]=99 , snake_case__ : List[Any]=0 , snake_case__ : List[Any]=32 , snake_case__ : Tuple=5 , snake_case__ : int=4 , snake_case__ : str=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_12 , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=0.02 , snake_case__ : Optional[int]=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : int="last" , snake_case__ : str=True , snake_case__ : Dict=None , snake_case__ : int=0 , ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : List[str] = batch_size
snake_case : Dict = seq_length
snake_case : List[str] = is_training
snake_case : Dict = use_input_lengths
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : List[Any] = gelu_activation
snake_case : Any = sinusoidal_embeddings
snake_case : Tuple = causal
snake_case : str = asm
snake_case : Tuple = n_langs
snake_case : int = vocab_size
snake_case : Any = n_special
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : int = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : str = num_labels
snake_case : Optional[Any] = num_choices
snake_case : Union[str, Any] = summary_type
snake_case : Any = use_proj
snake_case : int = scope
snake_case : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_input_lengths:
snake_case : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Any = None
if self.use_token_type_ids:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : Optional[int] = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = XLMModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
snake_case : Optional[Any] = model(snake_case__ , langs=snake_case__ )
snake_case : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] , ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = XLMWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : str = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : int , ) -> Optional[int]:
'''simple docstring'''
snake_case : int = XLMForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
snake_case : Optional[int] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
snake_case : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple , ) -> str:
'''simple docstring'''
snake_case : int = XLMForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
snake_case : Optional[int] = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
snake_case : int = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((snake_case) , ) : List[Any] = result_with_labels.to_tuple()
snake_case : Optional[Any] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((snake_case) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = XLMForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ )
snake_case : List[str] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Tuple = XLMForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.num_choices
snake_case : str = XLMForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Dict = config_and_inputs
snake_case : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ : str = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str=False ) -> int:
'''simple docstring'''
snake_case : Tuple = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Any = XLMModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=False , snake_case__ : int=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_attentions in attentions] , [True] * len(snake_case__ ) )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case__ ):
# adds PAD dummy token
snake_case : int = min_length + idx + 1
snake_case : Any = min_length + idx + 1
snake_case : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int]=False , snake_case__ : str=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case__ ) , )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case__ ):
# adds PAD dummy token
snake_case : str = min_length + idx + 1
snake_case : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case__ ) , )
pass
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = XLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case__ )
snake_case : Any = torch.tensor([[14, 4_47]] , dtype=torch.long , device=snake_case__ ) # the president
snake_case : Dict = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : Optional[int] = model.generate(snake_case__ , do_sample=snake_case__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case__ )
| 59 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
import numpy as np
import datasets
snake_case__ : Dict = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
snake_case__ : Union[str, Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
snake_case__ : Any = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
# convert to numpy arrays
lowerCAmelCase : List[str] = np.array(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase : List[Any] = X - np.mean(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
lowerCAmelCase : Dict = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
lowerCAmelCase : List[str] = np.linalg.pinv(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 60 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,lowercase__ ,)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE__ : int = """roberta"""
def __init__( self , lowercase_ ):
"""simple docstring"""
super().__init__(lowercase_ )
UpperCAmelCase_ : int = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ ,lowercase__ ,)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = """roberta"""
def __init__( self , lowercase_ ):
"""simple docstring"""
super().__init__(lowercase_ )
UpperCAmelCase_ : Any = config.num_labels
UpperCAmelCase_ : Optional[Any] = config.num_hidden_layers
UpperCAmelCase_ : int = DeeRobertaModel(lowercase_ )
UpperCAmelCase_ : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_layers
try:
UpperCAmelCase_ : str = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
UpperCAmelCase_ : List[str] = outputs[1]
UpperCAmelCase_ : Optional[Any] = self.dropout(lowercase_ )
UpperCAmelCase_ : List[str] = self.classifier(lowercase_ )
UpperCAmelCase_ : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Optional[int] = e.message
UpperCAmelCase_ : Optional[Any] = e.exit_layer
UpperCAmelCase_ : List[str] = outputs[0]
if not self.training:
UpperCAmelCase_ : Optional[int] = entropy(lowercase_ )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : int = MSELoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Dict = CrossEntropyLoss()
UpperCAmelCase_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : List[str] = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : int = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : str = MSELoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : List[Any] = CrossEntropyLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
UpperCAmelCase_ : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : Any = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 61 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : "DiagonalGaussianDistribution"
class UpperCAmelCase__ ( A_ , A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = True
@register_to_config
def __init__( self , A_ = 3 , A_ = 3 , A_ = ("DownEncoderBlock2D",) , A_ = ("UpDecoderBlock2D",) , A_ = (64,) , A_ = 1 , A_ = "silu" , A_ = 4 , A_ = 32 , A_ = 32 , A_ = 0.1_8215 , ) -> Any:
super().__init__()
# pass init params to Encoder
__UpperCamelCase =Encoder(
in_channels=A_ , out_channels=A_ , down_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , double_z=A_ , )
# pass init params to Decoder
__UpperCamelCase =Decoder(
in_channels=A_ , out_channels=A_ , up_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , norm_num_groups=A_ , act_fn=A_ , )
__UpperCamelCase =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__UpperCamelCase =nn.Convad(A_ , A_ , 1 )
__UpperCamelCase =False
__UpperCamelCase =False
# only relevant if vae tiling is enabled
__UpperCamelCase =self.config.sample_size
__UpperCamelCase =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__UpperCamelCase =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__UpperCamelCase =0.25
def _a ( self , A_ , A_=False ) -> Any:
if isinstance(A_ , (Encoder, Decoder) ):
__UpperCamelCase =value
def _a ( self , A_ = True ) -> List[Any]:
__UpperCamelCase =use_tiling
def _a ( self ) -> Union[str, Any]:
self.enable_tiling(A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =True
def _a ( self ) -> Any:
__UpperCamelCase =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self ) -> Dict[str, AttentionProcessor]:
__UpperCamelCase ={}
def fn_recursive_add_processors(A_ , A_ , A_ ):
if hasattr(A_ , 'set_processor' ):
__UpperCamelCase =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(A_ )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(A_ , A_ , A_ ):
if hasattr(A_ , 'set_processor' ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def _a ( self ) -> List[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a ( self , A_ , A_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A_ , return_dict=A_ )
if self.use_slicing and x.shape[0] > 1:
__UpperCamelCase =[self.encoder(A_ ) for x_slice in x.split(1 )]
__UpperCamelCase =torch.cat(A_ )
else:
__UpperCamelCase =self.encoder(A_ )
__UpperCamelCase =self.quant_conv(A_ )
__UpperCamelCase =DiagonalGaussianDistribution(A_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A_ )
def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A_ , return_dict=A_ )
__UpperCamelCase =self.post_quant_conv(A_ )
__UpperCamelCase =self.decoder(A_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
@apply_forward_hook
def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__UpperCamelCase =[self._decode(A_ ).sample for z_slice in z.split(1 )]
__UpperCamelCase =torch.cat(A_ )
else:
__UpperCamelCase =self._decode(A_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A_ )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =min(a.shape[2] , b.shape[2] , A_ )
for y in range(A_ ):
__UpperCamelCase =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a ( self , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =min(a.shape[3] , b.shape[3] , A_ )
for x in range(A_ ):
__UpperCamelCase =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a ( self , A_ , A_ = True ) -> AutoencoderKLOutput:
__UpperCamelCase =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__UpperCamelCase =int(self.tile_latent_min_size * self.tile_overlap_factor )
__UpperCamelCase =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__UpperCamelCase =[]
for i in range(0 , x.shape[2] , A_ ):
__UpperCamelCase =[]
for j in range(0 , x.shape[3] , A_ ):
__UpperCamelCase =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__UpperCamelCase =self.encoder(A_ )
__UpperCamelCase =self.quant_conv(A_ )
row.append(A_ )
rows.append(A_ )
__UpperCamelCase =[]
for i, row in enumerate(A_ ):
__UpperCamelCase =[]
for j, tile in enumerate(A_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCamelCase =self.blend_v(rows[i - 1][j] , A_ , A_ )
if j > 0:
__UpperCamelCase =self.blend_h(row[j - 1] , A_ , A_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A_ , dim=3 ) )
__UpperCamelCase =torch.cat(A_ , dim=2 )
__UpperCamelCase =DiagonalGaussianDistribution(A_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A_ )
def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__UpperCamelCase =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__UpperCamelCase =int(self.tile_sample_min_size * self.tile_overlap_factor )
__UpperCamelCase =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__UpperCamelCase =[]
for i in range(0 , z.shape[2] , A_ ):
__UpperCamelCase =[]
for j in range(0 , z.shape[3] , A_ ):
__UpperCamelCase =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__UpperCamelCase =self.post_quant_conv(A_ )
__UpperCamelCase =self.decoder(A_ )
row.append(A_ )
rows.append(A_ )
__UpperCamelCase =[]
for i, row in enumerate(A_ ):
__UpperCamelCase =[]
for j, tile in enumerate(A_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCamelCase =self.blend_v(rows[i - 1][j] , A_ , A_ )
if j > 0:
__UpperCamelCase =self.blend_h(row[j - 1] , A_ , A_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A_ , dim=3 ) )
__UpperCamelCase =torch.cat(A_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
def _a ( self , A_ , A_ = False , A_ = True , A_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__UpperCamelCase =sample
__UpperCamelCase =self.encode(A_ ).latent_dist
if sample_posterior:
__UpperCamelCase =posterior.sample(generator=A_ )
else:
__UpperCamelCase =posterior.mode()
__UpperCamelCase =self.decode(A_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
| 62 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase_ : List[Any] = 10
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def _lowerCamelCase ( lowercase : list[int] , lowercase : int ) -> int:
_a = 0
_a = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
_a = (left + right) // 3 + 1
_a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a = one_third - 1
elif array[two_third] < target:
_a = two_third + 1
else:
_a = one_third + 1
_a = two_third - 1
else:
return -1
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
_a = (left + right) // 3 + 1
_a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : int = input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase_ : Tuple = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase_ : str = int(input('Enter the number to be found in the list:\n').strip())
lowerCAmelCase_ : Optional[int] = ite_ternary_search(collection, target)
lowerCAmelCase_ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 63 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False
@dataclass
class A :
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase : str = field(default='Audio' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__(self : int ) -> Dict:
"""simple docstring"""
return self.pa_type
def lowercase_ (self : int , __UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowercase_ (self : int , __UpperCAmelCase : dict , __UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase__ , UpperCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase__ = token_per_repo_id or {}
UpperCAmelCase__ = path.split("::" )[-1]
try:
UpperCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
UpperCAmelCase__ = array.T
if self.mono:
UpperCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase_ (self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowercase_ (self : List[Any] , __UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase__ = storage.field("bytes" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase__ = storage.field("path" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def lowercase_ (self : Dict , __UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase : Any ):
with xopen(__UpperCAmelCase , "rb" ) as f:
UpperCAmelCase__ = f.read()
return bytes_
UpperCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 65 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
def A_ ( _lowercase = 1000 ):
'''simple docstring'''
snake_case_ :List[str] = 3
snake_case_ :List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 66 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
__lowerCamelCase = 0.0
for coeff in reversed(UpperCamelCase__ ):
__lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__UpperCAmelCase =(0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCAmelCase =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 67 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image ) -> Image:
'''simple docstring'''
A__ , A__ = image.size
A__ = 0
A__ = image.load()
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
A__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCAmelCase__ = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 68 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=8 ) -> Optional[Any]:
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[str]:
super().__init__()
self.register_modules(
text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, movq=lowerCAmelCase__, )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if latents is None:
snake_case_ = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__)
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}')
snake_case_ = latents.to(lowerCAmelCase__)
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, ) -> int:
snake_case_ = len(lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', truncation=lowerCAmelCase__, max_length=77, return_attention_mask=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCAmelCase__, padding='longest', return_tensors='pt').input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
snake_case_ = text_input_ids.to(lowerCAmelCase__)
snake_case_ = text_inputs.attention_mask.to(lowerCAmelCase__)
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)
snake_case_ = prompt_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = text_mask.repeat_interleave(lowerCAmelCase__, dim=0)
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [''] * batch_size
elif type(lowerCAmelCase__) is not type(lowerCAmelCase__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__)} !='
f' {type(lowerCAmelCase__)}.')
elif isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCAmelCase__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.')
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', max_length=77, truncation=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = uncond_input.input_ids.to(lowerCAmelCase__)
snake_case_ = uncond_input.attention_mask.to(lowerCAmelCase__)
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1, lowerCAmelCase__)
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt, lowerCAmelCase__)
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1, lowerCAmelCase__, 1)
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt, lowerCAmelCase__, -1)
snake_case_ = uncond_text_mask.repeat_interleave(lowerCAmelCase__, dim=0)
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds])
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
snake_case_ = torch.cat([uncond_text_mask, text_mask])
return prompt_embeds, text_encoder_hidden_states, text_mask
def a_ ( self, lowerCAmelCase__=0) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
snake_case_ = torch.device(f'cuda:{gpu_id}')
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__=0) -> int:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
snake_case_ = torch.device(f'cuda:{gpu_id}')
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=lowerCAmelCase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCAmelCase__, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__)
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__)
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a_ ( self) -> Tuple:
if not hasattr(self.unet, '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__, '_hf_hook')
and hasattr(module._hf_hook, 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = 512, lowerCAmelCase__ = 512, lowerCAmelCase__ = 100, lowerCAmelCase__ = 4.0, lowerCAmelCase__ = 1, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1
elif isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = len(lowerCAmelCase__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}')
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = torch.cat(lowerCAmelCase__, dim=0)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = torch.cat(lowerCAmelCase__, dim=0)
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = negative_image_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
dtype=prompt_embeds.dtype, device=lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__, device=lowerCAmelCase__)
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCAmelCase__, lowerCAmelCase__, self.movq_scale_factor)
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
snake_case_ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
snake_case_ = self.unet(
sample=lowerCAmelCase__, timestep=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, added_cond_kwargs=lowerCAmelCase__, return_dict=lowerCAmelCase__, )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1], dim=1)
snake_case_ , snake_case_ = noise_pred.chunk(2)
snake_case_ , snake_case_ = variance_pred.chunk(2)
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, generator=lowerCAmelCase__, ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCAmelCase__, force_not_quantize=lowerCAmelCase__)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}')
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0, 1)
snake_case_ = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 69 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return EnvironmentCommand()
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase ( snake_case_ ):
@staticmethod
def lowercase__ ( __snake_case : ArgumentParser ) -> Optional[Any]:
_lowerCAmelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__snake_case )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__snake_case , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__snake_case )
def __init__( self : Dict , __snake_case : Optional[Any] , *__snake_case : Dict ) -> None:
_lowerCAmelCase = accelerate_config_file
def lowercase__ ( self : Optional[int] ) -> Dict:
_lowerCAmelCase = """not installed"""
if is_safetensors_available():
import safetensors
_lowerCAmelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_lowerCAmelCase = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = _lowerCAmelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__snake_case ):
_lowerCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCAmelCase = (
"""\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__snake_case , __snake_case )
else f"\t{accelerate_config}"
)
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_torch_available():
import torch
_lowerCAmelCase = torch.__version__
_lowerCAmelCase = torch.cuda.is_available()
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase = tf.__version__
try:
# deprecated in v2.1
_lowerCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCAmelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCAmelCase = flax.__version__
_lowerCAmelCase = jax.__version__
_lowerCAmelCase = jaxlib.__version__
_lowerCAmelCase = jax.lib.xla_bridge.get_backend().platform
_lowerCAmelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f"{safetensors_version}",
"""Accelerate version""": f"{accelerate_version}",
"""Accelerate config""": f"{accelerate_config_str}",
"""PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": f"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": f"{flax_version} ({jax_backend})",
"""Jax version""": f"{jax_version}",
"""JaxLib version""": f"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase__ ( __snake_case : str ) -> Dict:
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 70 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[Any] = logging.get_logger(__name__)
def A ( a_ ,a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase : List[Any] =original_name.split('.' )[0]
__UpperCamelCase : Any =key.split('.' )
__UpperCamelCase : Any =int(key_list[key_list.index(a_ ) - 2] )
__UpperCamelCase : Optional[int] =int(key_list[key_list.index(a_ ) - 1] )
__UpperCamelCase : Tuple =orig_block_num - offset
__UpperCamelCase : Any =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' ,F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A ( a_ ) -> Optional[int]:
__UpperCamelCase : List[str] =OrderedDict()
__UpperCamelCase , __UpperCamelCase : Dict =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase : str =key.replace('network' ,'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase : int =key[: key.find('proj' )]
__UpperCamelCase : str =key.replace(a_ ,F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase : int =key.replace('proj' ,'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase : str ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'mlp.fc1' ,'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'mlp.fc2' ,'output.conv2' )
if "norm1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'norm1' ,'before_norm' )
if "norm2" in key:
__UpperCamelCase : List[str] =replace_key_with_offset(a_ ,a_ ,'norm2' ,'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'layer_scale_1' ,'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase : Dict =replace_key_with_offset(a_ ,a_ ,'layer_scale_2' ,'layer_scale_2' )
if "head" in key:
__UpperCamelCase : List[Any] =key.replace('head' ,'classifier' )
__UpperCamelCase : Optional[Any] =value
return new_state_dict
def A ( ) -> int:
__UpperCamelCase : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase : Dict =Image.open(requests.get(a_ ,stream=a_ ).raw )
return image
@torch.no_grad()
def A ( a_ ,a_ ,a_ ) -> List[str]:
__UpperCamelCase : Optional[Any] =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase : Union[str, Any] ='huggingface/label-files'
__UpperCamelCase : List[Any] =model_name[-3:]
__UpperCamelCase : Optional[int] =1_000
__UpperCamelCase : str ='imagenet-1k-id2label.json'
__UpperCamelCase : Any =(1, 1_000)
# set config attributes
__UpperCamelCase : Tuple =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Optional[Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase : Tuple =[2, 2, 6, 2]
__UpperCamelCase : List[Any] =[64, 128, 320, 512]
__UpperCamelCase : Any =4.0
__UpperCamelCase : Tuple =0.9
elif size == "s24":
__UpperCamelCase : str =[4, 4, 12, 4]
__UpperCamelCase : Optional[Any] =[64, 128, 320, 512]
__UpperCamelCase : int =4.0
__UpperCamelCase : Union[str, Any] =0.9
elif size == "s36":
__UpperCamelCase : int =[6, 6, 18, 6]
__UpperCamelCase : int =[64, 128, 320, 512]
__UpperCamelCase : List[Any] =4.0
__UpperCamelCase : Any =1e-6
__UpperCamelCase : str =0.9
elif size == "m36":
__UpperCamelCase : Optional[Any] =[6, 6, 18, 6]
__UpperCamelCase : Union[str, Any] =[96, 192, 384, 768]
__UpperCamelCase : int =4.0
__UpperCamelCase : Tuple =1e-6
__UpperCamelCase : Dict =0.95
elif size == "m48":
__UpperCamelCase : Union[str, Any] =[8, 8, 24, 8]
__UpperCamelCase : List[Any] =[96, 192, 384, 768]
__UpperCamelCase : Optional[Any] =4.0
__UpperCamelCase : Optional[Any] =1e-6
__UpperCamelCase : Dict =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase : List[str] =PoolFormerImageProcessor(crop_pct=a_ )
# Prepare image
__UpperCamelCase : Optional[Any] =prepare_img()
__UpperCamelCase : int =image_processor(images=a_ ,return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase : List[str] =torch.load(a_ ,map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase : Union[str, Any] =rename_keys(a_ )
# create HuggingFace model and load state dict
__UpperCamelCase : List[Any] =PoolFormerForImageClassification(a_ )
model.load_state_dict(a_ )
model.eval()
# Define image processor
__UpperCamelCase : List[Any] =PoolFormerImageProcessor(crop_pct=a_ )
__UpperCamelCase : str =image_processor(images=prepare_img() ,return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase : List[Any] =model(a_ )
__UpperCamelCase : Union[str, Any] =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase : List[Any] =torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__UpperCamelCase : Tuple =torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__UpperCamelCase : List[Any] =torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__UpperCamelCase : Optional[int] =torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__UpperCamelCase : List[str] =torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,a_ ,atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( A_ : int, A_ : Tuple, A_ : Any, A_ : Tuple, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_lowerCamelCase : str = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_lowerCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( A_ : str, A_ : int, A_ : str, A_ : List[str], A_ : Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.weight''']
_lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_lowerCamelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[str] = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : List[str] = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : List[str] = checkpoint['''label_emb.weight''']
_lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : Tuple = unet_config['''down_block_types''']
_lowerCamelCase : Tuple = unet_config['''layers_per_block''']
_lowerCamelCase : List[Any] = unet_config['''attention_head_dim''']
_lowerCamelCase : str = unet_config['''block_out_channels''']
_lowerCamelCase : Dict = 1
_lowerCamelCase : str = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCamelCase : str = channels_list[i]
_lowerCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Tuple = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : str = F'''down_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.1'''
_lowerCamelCase : str = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.downsamplers.0'''
_lowerCamelCase : List[str] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ )
current_layer += 1
_lowerCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Tuple = '''mid_block.resnets.0'''
_lowerCamelCase : int = '''middle_block.0'''
_lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : List[str] = '''mid_block.attentions.0'''
_lowerCamelCase : Any = '''middle_block.1'''
_lowerCamelCase : List[Any] = convert_attention(A_, A_, A_, A_, A_ )
_lowerCamelCase : Tuple = '''mid_block.resnets.1'''
_lowerCamelCase : Tuple = '''middle_block.2'''
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : List[str] = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Tuple = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[Any] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : List[str] = F'''up_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.1'''
_lowerCamelCase : int = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Tuple = F'''output_blocks.{current_layer-1}.2'''
_lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = checkpoint['''out.0.weight''']
_lowerCamelCase : int = checkpoint['''out.0.bias''']
_lowerCamelCase : Union[str, Any] = checkpoint['''out.2.weight''']
_lowerCamelCase : Optional[int] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 72 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return "".join(chr(ord(lowerCamelCase__ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from math import factorial
_lowercase = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__ ) )
def _snake_case ( snake_case__ : int = 60 , snake_case__ : int = 100_0000 ):
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
A = 0
# the cached sizes of the previous chains
A = {}
for start_chain_element in range(1 , snake_case__ ):
# The temporary set will contain the elements of the chain
A = set()
A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case__ )
chain_set_length += 1
A = digit_factorial_sum(snake_case__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""") | 74 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a_ : Tuple = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def a_ ( __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase_ =True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __snake_case , )
is not None
):
lowerCamelCase_ =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase_ =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase_ =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowerCamelCase_ =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowerCamelCase_ =True
if not attribute_used:
lowerCamelCase_ =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase_ =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase_ =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase_ =True
elif attribute.endswith('''_token_id''' ):
lowerCamelCase_ =True
# configuration class specific cases
if not case_allowed:
lowerCamelCase_ =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase_ =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase_ =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowerCamelCase_ =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase_ ={}
if len(config_class.attribute_map ) > 0:
lowerCamelCase_ ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase_ =inspect.getsourcefile(__snake_case )
lowerCamelCase_ =os.path.dirname(__snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase_ =[os.path.join(__snake_case , __snake_case ) for fn in os.listdir(__snake_case ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowerCamelCase_ =[]
for path in modeling_paths:
if os.path.isfile(__snake_case ):
with open(__snake_case ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase_ =[]
for config_param, default_value in zip(__snake_case , __snake_case ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase_ =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__snake_case , __snake_case , __snake_case , __snake_case ):
unused_attributes.append(attributes[0] )
return sorted(__snake_case )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase_ =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __snake_case : inspect.isclass(__snake_case )
and issubclass(__snake_case , __snake_case )
and inspect.getmodule(__snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase_ =check_config_attributes_being_used(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase_ =unused_attributes
if len(__snake_case ) > 0:
lowerCamelCase_ ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(__snake_case )
if __name__ == "__main__":
check_config_attributes()
| 75 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( _a):
def decorator(_a):
SCREAMING_SNAKE_CASE : Dict = getattr(_a , "handle_key" , [])
handle += [key]
setattr(_a , "handle_key" , _a)
return func
return decorator
def lowerCamelCase__ ( *_a):
def decorator(_a):
SCREAMING_SNAKE_CASE : Dict = getattr(_a , "handle_key" , [])
handle += keys
setattr(_a , "handle_key" , _a)
return func
return decorator
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __new__( cls : Tuple , a : Tuple , a : Dict , a : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = super().__new__(cls , a , a , a )
if not hasattr(a , "key_handler" ):
setattr(a , "key_handler" , {} )
setattr(a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE : List[Any] = getattr(a , "handle_key" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE : List[str] = value
return new_cls
@staticmethod
def __UpperCamelCase ( cls : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE : int = ord(a )
SCREAMING_SNAKE_CASE : str = cls.key_handler.get(a )
if handler:
SCREAMING_SNAKE_CASE : Dict = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy()) | 76 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
from math import loga
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mvp'''
snake_case = ['''past_key_values''']
snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __UpperCAmelCase : Any=50267 , __UpperCAmelCase : Tuple=1024 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Tuple=100 , __UpperCAmelCase : Union[str, Any]=800 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = classifier_dropout
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = use_prompt
_A = prompt_length
_A = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __UpperCAmelCase ):
_A = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 79 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
'''simple docstring'''
import json
import sys
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
with open(__A , encoding="utf-8" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__A ):
UpperCamelCase__ = results[benchmark_name]
UpperCamelCase__ = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCamelCase__ = "| metric |"
UpperCamelCase__ = "|--------|"
UpperCamelCase__ = "| new / old (diff) |"
for metric_name in sorted(__A ):
UpperCamelCase__ = benchmark_res[metric_name]
UpperCamelCase__ = metric_vals["new"]
UpperCamelCase__ = metric_vals.get("old" , __A )
UpperCamelCase__ = metric_vals.get("diff" , __A )
UpperCamelCase__ = F''' {new_val:f}''' if isinstance(__A , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__A , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__A , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__A ) )
if __name__ == "__main__":
a__ : Tuple = sys.argv[1]
a__ : Union[str, Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 80 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
"""simple docstring"""
import cva
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , __A , __A ) -> int:
if k in (0.04, 0.06):
a =k
a =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def SCREAMING_SNAKE_CASE ( self , __A ) -> tuple[cva.Mat, list[list[int]]]:
a =cva.imread(__A , 0 )
a , a =img.shape
a =[]
a =img.copy()
a =cva.cvtColor(__A , cva.COLOR_GRAY2RGB )
a , a =np.gradient(__A )
a =dx**2
a =dy**2
a =dx * dy
a =0.04
a =self.window_size // 2
for y in range(__A , h - offset ):
for x in range(__A , w - offset ):
a =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =(wxx * wyy) - (wxy**2)
a =wxx + wyy
a =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : int = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img) | 81 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = 42
__lowerCamelCase = None
def _UpperCAmelCase ( snake_case , snake_case=0.999 , snake_case="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCAmelCase = []
for i in range(snake_case ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , _snake_case = 1000 , _snake_case = "fixed_small_log" , _snake_case = True , _snake_case = 1.0 , _snake_case = "epsilon" , _snake_case = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
_lowerCAmelCase = betas_for_alpha_bar(_snake_case )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
_lowerCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _snake_case )[::-1].copy() )
_lowerCAmelCase = variance_type
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
return sample
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCAmelCase = (np.arange(0 , _snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(_snake_case )
def snake_case ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None ):
"""simple docstring"""
if prev_timestep is None:
_lowerCAmelCase = t - 1
_lowerCAmelCase = self.alphas_cumprod[t]
_lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCAmelCase = 1 - alpha_prod_t
_lowerCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCAmelCase = self.betas[t]
else:
_lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCAmelCase = torch.log(torch.clamp(_snake_case , min=1e-20 ) )
_lowerCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCAmelCase = variance.log()
_lowerCAmelCase = beta.log()
_lowerCAmelCase = (predicted_variance + 1) / 2
_lowerCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case=None , _snake_case = True , ):
"""simple docstring"""
_lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCAmelCase , _lowerCAmelCase = torch.split(_snake_case , sample.shape[1] , dim=1 )
else:
_lowerCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCAmelCase = t - 1
_lowerCAmelCase = self.alphas_cumprod[t]
_lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCAmelCase = 1 - alpha_prod_t
_lowerCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCAmelCase = self.betas[t]
_lowerCAmelCase = self.alphas[t]
else:
_lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = torch.clamp(
_snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCAmelCase = 0
if t > 0:
_lowerCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_snake_case , device=model_output.device )
_lowerCAmelCase = self._get_variance(
_snake_case , predicted_variance=_snake_case , prev_timestep=_snake_case , )
if self.variance_type == "fixed_small_log":
_lowerCAmelCase = variance
elif self.variance_type == "learned_range":
_lowerCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
_lowerCAmelCase = variance * variance_noise
_lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_lowerCAmelCase = timesteps.to(original_samples.device )
_lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 82 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_UpperCamelCase : Tuple = 1_2_8
elif "12-12" in model_name:
_UpperCamelCase : Tuple = 1_2
_UpperCamelCase : Tuple = 1_2
elif "14-14" in model_name:
_UpperCamelCase : Optional[Any] = 1_4
_UpperCamelCase : Union[str, Any] = 1_4
elif "16-16" in model_name:
_UpperCamelCase : Dict = 1_6
_UpperCamelCase : Optional[Any] = 1_6
else:
raise ValueError('Model not supported' )
_UpperCamelCase : Optional[int] = 'huggingface/label-files'
if "speech-commands" in model_name:
_UpperCamelCase : List[str] = 3_5
_UpperCamelCase : List[str] = 'speech-commands-v2-id2label.json'
else:
_UpperCamelCase : Union[str, Any] = 5_2_7
_UpperCamelCase : str = 'audioset-id2label.json'
_UpperCamelCase : Dict = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : List[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def A__ ( UpperCAmelCase_ ):
if "module.v" in name:
_UpperCamelCase : str = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
_UpperCamelCase : Dict = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
_UpperCamelCase : str = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
_UpperCamelCase : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_UpperCamelCase : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCamelCase : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCamelCase : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_UpperCamelCase : Any = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
_UpperCamelCase : List[str] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
_UpperCamelCase : int = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Tuple = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
_UpperCamelCase : List[str] = key.split('.' )
_UpperCamelCase : List[Any] = int(key_split[3] )
_UpperCamelCase : str = config.hidden_size
if "weight" in key:
_UpperCamelCase : Tuple = val[:dim, :]
_UpperCamelCase : Optional[int] = val[dim : dim * 2, :]
_UpperCamelCase : List[str] = val[-dim:, :]
else:
_UpperCamelCase : Optional[Any] = val[:dim]
_UpperCamelCase : Union[str, Any] = val[dim : dim * 2]
_UpperCamelCase : Union[str, Any] = val[-dim:]
else:
_UpperCamelCase : Tuple = val
return orig_state_dict
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
_UpperCamelCase : str = get_audio_spectrogram_transformer_config(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
_UpperCamelCase : List[Any] = model_name_to_url[model_name]
_UpperCamelCase : List[Any] = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )
# remove some keys
remove_keys(UpperCAmelCase_ )
# rename some keys
_UpperCamelCase : Union[str, Any] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
# load 🤗 model
_UpperCamelCase : Dict = ASTForAudioClassification(UpperCAmelCase_ )
model.eval()
model.load_state_dict(UpperCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_UpperCamelCase : List[Any] = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
_UpperCamelCase : str = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
_UpperCamelCase : Tuple = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8
_UpperCamelCase : Union[str, Any] = ASTFeatureExtractor(mean=UpperCAmelCase_ , std=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
if "speech-commands" in model_name:
_UpperCamelCase : Union[str, Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
_UpperCamelCase : List[Any] = dataset[0]['audio']['array']
else:
_UpperCamelCase : int = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
_UpperCamelCase , _UpperCamelCase : List[Any] = torchaudio.load(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = waveform.squeeze().numpy()
_UpperCamelCase : Tuple = feature_extractor(UpperCAmelCase_ , sampling_rate=1_6_0_0_0 , return_tensors='pt' )
# forward pass
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_UpperCamelCase : List[str] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_UpperCamelCase : int = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_UpperCamelCase : List[str] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_UpperCamelCase : Optional[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_UpperCamelCase : List[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_UpperCamelCase : List[str] = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_UpperCamelCase : Optional[Any] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_UpperCamelCase : Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case_ : Optional[int] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : str , lowercase__ : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = word_bank or []
# create a table
lowerCAmelCase_ :int = len(lowercase__ ) + 1
lowerCAmelCase_ :list[list[list[str]]] = []
for _ in range(lowercase__ ):
table.append([] )
# seed value
lowerCAmelCase_ :Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase__ )] == word:
lowerCAmelCase_ :list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase__ )]:
combination.reverse()
return table[len(lowercase__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 84 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ : ClassVar[Features] = Features({"audio": Audio()} )
lowerCAmelCase_ : ClassVar[Features] = Features({"transcription": Value("string" )} )
lowerCAmelCase_ : str = "audio"
lowerCAmelCase_ : str = "transcription"
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , a__ ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 85 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from math import factorial, radians
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 18 , _UpperCamelCase = 10 ):
__lowerCAmelCase : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowerCAmelCase : Dict = radians(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = angle_in_radians
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : Any = -1
for _ in range(_UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod() | 86 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = LDMTextToImagePipeline
__A : int = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__A : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : str = False
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ : Optional[int] = CLIPTextModel(lowercase_ )
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Optional[int]=0 ) -> Dict:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : int = torch.manual_seed(lowercase_ )
else:
lowercase__ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.get_dummy_components()
lowercase__ : Any = LDMTextToImagePipeline(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : str = self.get_dummy_inputs(lowercase_ )
lowercase__ : Union[str, Any] = pipe(**lowercase_ ).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowercase__ : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=torch.floataa , lowercase_ : List[Any]=0 ) -> int:
lowercase__ : Dict = torch.manual_seed(lowercase_ )
lowercase__ : Union[str, Any] = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 32, 32) )
lowercase__ : Optional[Any] = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
lowercase__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Tuple = self.get_inputs(lowercase_ )
lowercase__ : Optional[Any] = pipe(**lowercase_ ).images
lowercase__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
lowercase__ : List[Any] = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
lowercase__ : Optional[int] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=torch.floataa , lowercase_ : List[str]=0 ) -> Dict:
lowercase__ : Optional[Any] = torch.manual_seed(lowercase_ )
lowercase__ : str = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 32, 32) )
lowercase__ : Dict = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
lowercase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> str:
lowercase__ : Any = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = self.get_inputs(lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ ).images[0]
lowercase__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
lowercase__ : Dict = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 87 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : List[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 89 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[int] ) -> list[int]:
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
return array
__lowerCamelCase , __lowerCamelCase = min(UpperCamelCase__ ), max(UpperCamelCase__ )
# Compute the variables
__lowerCamelCase = _max - _min + 1
__lowerCamelCase , __lowerCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowerCamelCase = i - _min
__lowerCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowerCamelCase = 0
for i in range(UpperCamelCase__ ):
while holes_repeat[i] > 0:
__lowerCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by comma:\n")
__A = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 90 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _A (__a , __a = None , __a = None , __a = None , __a = None , __a = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(__a )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__a ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def _A (__a , __a = None , __a = None , __a = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(__a )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__a ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 91 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return 10 - x * x
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError('''Wrong space!''' )
lowercase_ : Any = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ : Dict = (a + b) / 2
# Check if middle point is root
if equation(__SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) < 0:
lowercase_ : Optional[Any] = c
else:
lowercase_ : List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 93 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a :Any = set()
# Replace all the whitespace in our sentence
a :str = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase_ ) == 26
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a :str = [False] * 26
for char in input_str:
if char.islower():
a :int = True
elif char.isupper():
a :List[str] = True
return all(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCamelCase ( ):
"""simple docstring"""
from timeit import timeit
a :str = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCAmelCase_ ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCAmelCase_ ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCAmelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = DPRContextEncoderTokenizer
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = DPRQuestionEncoderTokenizer
UpperCAmelCase : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
UpperCAmelCase : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
UpperCAmelCase : Tuple = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCamelCase__)
class __lowerCAmelCase :
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
a__ : Optional[Any] =titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Union[str, Any] =titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
a__ : List[str] =texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
a__ : int =len(lowerCAmelCase__ )
a__ : str =questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F'''There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts.'''
a__ : int =super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : str =super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : Optional[Any] ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
a__ : List[Any] =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ : Tuple =attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[Any] =reader_input["input_ids"]
a__ , a__ , a__ : Union[str, Any] =reader_output[:3]
a__ : Optional[int] =len(lowerCAmelCase__ )
a__ : Optional[Any] =sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
a__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
a__ : List[str] =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ : Union[str, Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ : int =sequence_ids.index(self.pad_token_id )
else:
a__ : Optional[Any] =len(lowerCAmelCase__ )
a__ : Any =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[int] =[]
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ : str =sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
a__ : Any =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
a__ : List[Any] =end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = ["""input_ids""", """attention_mask"""]
_lowercase : List[str] = DPRReaderTokenizer
| 95 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
def _snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
UpperCamelCase__ :str = [0] * len_array
if len_array > 0:
UpperCamelCase__ :Optional[Any] = array[0]
for i in range(1 , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TextaTextGenerationPipeline(model=lowerCamelCase__ ,tokenizer=lowerCamelCase__ )
return generator, ["Something to write", "Something else"]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = generator('Something there' )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ANY(lowerCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCAmelCase__ = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
UpperCAmelCase__ = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
[{'generated_text': ANY(lowerCamelCase__ )}, {'generated_text': ANY(lowerCamelCase__ )}],
] ,)
with self.assertRaises(lowerCamelCase__ ):
generator(4 )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
UpperCAmelCase__ = 3
UpperCAmelCase__ = generator(
'Something there' ,num_return_sequences=lowerCamelCase__ ,num_beams=lowerCamelCase__ ,)
UpperCAmelCase__ = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = generator('This is a test' ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,return_tensors=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
UpperCAmelCase__ = generator.model.config.eos_token_id
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = generator(
['This is a test', 'This is a second test'] ,do_sample=lowerCamelCase__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=lowerCamelCase__ ,)
self.assertEqual(
lowerCamelCase__ ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator('Something there' ,do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,[{'generated_text': ''}] )
| 98 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
def A_ ( A__ ) -> None:
a__ : Optional[int] = generate_pascal_triangle(A__ )
for row_idx in range(A__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def A_ ( A__ ) -> list[list[int]]:
if not isinstance(A__ , A__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ : list[list[int]] = []
for current_row_idx in range(A__ ):
a__ : Any = populate_current_row(A__ , A__ )
triangle.append(A__ )
return triangle
def A_ ( A__ , A__ ) -> list[int]:
a__ : Optional[int] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ : str = 1, 1
for current_col_idx in range(1 , A__ ):
calculate_current_element(
A__ , A__ , A__ , A__ )
return current_row
def A_ ( A__ , A__ , A__ , A__ , ) -> None:
a__ : Dict = triangle[current_row_idx - 1][current_col_idx - 1]
a__ : List[str] = triangle[current_row_idx - 1][current_col_idx]
a__ : str = above_to_left_elt + above_to_right_elt
def A_ ( A__ ) -> list[list[int]]:
if not isinstance(A__ , A__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ : list[list[int]] = [[1]]
for row_index in range(1 , A__ ):
a__ : str = [0] + result[-1] + [0]
a__ : str = row_index + 1
# Calculate the number of distinct elements in a row
a__ : Optional[int] = sum(divmod(A__ , 2 ) )
a__ : Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ : Any = row_first_half + row_second_half
result.append(A__ )
return result
def A_ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A__ , A__ ) -> None:
a__ : List[str] = F'{func.__name__}({value})'
a__ : Tuple = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(A__ , A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 99 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__magic_name__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.